UE4 音频频谱转换曲线工具

因为工作上的需求,需要将音频可视化,虽然niagara可以实现音频可视化,但是是全局音频,并且需要在蓝图中使用,所以需要一个工具将频谱图转换为曲线在蓝图中使用。其实ue是提供了一个转换的工具的,但是不知为何是private的无法外部使用,遂将其拷出进行扩展使用。
使用时可能会出现崩溃(使用的工具库的原因),可以多试几次。

使用最后一个函数GetSoundAmplitudeAndFrequencyCurve,即可将频谱图转换为曲线FloatCurve资源。

其他参数不言自明,AmplitudeBuckets, SpectrumWidth两个参数需要注意一下,参数本质的含义注释已经解释了,实际使用中这两个参数决定了曲线有多少个key值(GetAmplitude,CalculateFrequencySpectrum,是以时间为横轴,振幅、频率为纵轴),GetSoundAmplitudeAndFrequencyCurve得到的曲线是以频率为横轴,振幅为纵轴,key数以两者少的key数为准,所以使用时最好两个值相等。

频率图 x时间y频率
频率图 x时间y频率
振幅图 x时间y振幅
在这里插入图片描述
频谱图 x频率y振幅
频率图 x时间y频率

拿到频谱图,可视化使用过程中应该是程序和音频同步放,然后根据时间得到频率和振幅,将频率划分区间,一定频率的区间显示一定频率的振幅。。应该是这样吧大概。

最后,如果重复调用函数生成曲线为空的话,删除原资源重新生成一下就好。

代码使用时注意要引入额外的模块

// Fill out your copyright notice in the Description page of Project Settings.

using UnrealBuildTool;

public class MyProjectEditor : ModuleRules
{
	public MyProjectEditor(ReadOnlyTargetRules Target) : base(Target)
	{
		PCHUsage = PCHUsageMode.UseExplicitOrSharedPCHs;
	
		PublicDependencyModuleNames.AddRange(new string[] { "Core", "CoreUObject", "Engine", "InputCore", "MyProject"});

		PrivateDependencyModuleNames.AddRange(new string[]
		{
			"AssetRegistry", "UnrealEd", "GameProjectGeneration", "ContentBrowser", "EditorScriptingUtilities",
			"SoundVisualizations"
		});
		
		if (Target.Platform == UnrealTargetPlatform.Win64 || Target.Platform == UnrealTargetPlatform.Win32)
		{
			// VS2015 updated some of the CRT definitions but not all of the Windows SDK has been updated to match.
			// Microsoft provides this shim library to enable building with VS2015 until they fix everything up.
			//@todo: remove when no longer neeeded (no other code changes should be necessary).
			if (Target.WindowsPlatform.bNeedsLegacyStdioDefinitionsLib)
			{
				PublicSystemLibraries.Add("legacy_stdio_definitions.lib");
			}
		}

		AddEngineThirdPartyPrivateStaticDependencies(Target, "Kiss_FFT");

		// Uncomment if you are using Slate UI
		// PrivateDependencyModuleNames.AddRange(new string[] { "Slate", "SlateCore" });
		
		// Uncomment if you are using online features
		// PrivateDependencyModuleNames.Add("OnlineSubsystem");

		// To include OnlineSubsystemSteam, add it to the plugins section in your uproject file with the Enabled attribute set to true
	}
}

// Fill out your copyright notice in the Description page of Project Settings.

#pragma once

#include "CoreMinimal.h"
#include "Kismet/BlueprintFunctionLibrary.h"
#include "SoundVisualizationStaticsTool.generated.h"

/**
* 
*/
UCLASS()
class USoundVisualizationStaticsTool : public UBlueprintFunctionLibrary
{
GENERATED_BODY()

protected:
static void CalculateFrequencySpectrum(USoundWave* SoundWave, const bool bSplitChannels, const float StartTime, const float TimeLength, const int32 SpectrumWidth, TArray< TArray<float> >& OutSpectrums);

/** Calculates the frequency spectrum for a window of time for the SoundWave
* @param SoundWave - The wave to generate the spectrum for
* @param Channel - The channel of the sound to calculate.  Specify 0 to combine channels together
* @param StartTime - The beginning of the window to calculate the spectrum of
* @param TimeLength - The duration of the window to calculate the spectrum of
* @param SpectrumWidth - How wide the spectrum is.  The total samples in the window are divided evenly across the spectrum width.
* @return OutSpectrum - The resulting spectrum
*/
UFUNCTION(BlueprintCallable, Category="SoundVisualization")
static void CalculateFrequencySpectrum(USoundWave* SoundWave, int32 Channel, float StartTime, float TimeLength, int32 SpectrumWidth, TArray<float>& OutSpectrum);

static void GetAmplitude(USoundWave* SoundWave, const bool bSplitChannels, const float StartTime, const float TimeLength, const int32 AmplitudeBuckets, TArray< TArray<float> >& OutAmplitudes);

/** Gathers the amplitude of the wave data for a window of time for the SoundWave
* @param SoundWave - The wave to get samples from
* @param Channel - The channel of the sound to get.  Specify 0 to combine channels together
* @param StartTime - The beginning of the window to get the amplitude from
* @param TimeLength - The duration of the window to get the amplitude from
* @param AmplitudeBuckets - How many samples to divide the data in to.  The amplitude is averaged from the wave samples for each bucket
* @return OutAmplitudes - The resulting amplitudes
*/
UFUNCTION(BlueprintCallable, Category="SoundVisualization")
static void GetAmplitude(USoundWave* SoundWave, int32 Channel, float StartTime, float TimeLength, int32 AmplitudeBuckets, TArray<float>& OutAmplitudes);

UFUNCTION(BlueprintCallable, Category="SoundVisualization")
static bool GetSoundAmplitudeAndFrequencyCurve(USoundWave* SoundWave, int32 Channel, float StartTime, float TimeLength, int32 AmplitudeBuckets, int32 SpectrumWidth, FString Floder = "SoundData");
};

// Fill out your copyright notice in the Description page of Project Settings.


#include "SoundVisualizationStaticsTool.h"

#include <functional>

#include "Audio.h"
#include "ContentBrowserModule.h"
#include "IContentBrowserSingleton.h"
#include "AssetRegistry/AssetRegistryModule.h"
#include "Factories/CurveFactory.h"
#include "Sound/SoundWave.h"
#include "Kiss_FFT/kiss_fft129/tools/kiss_fftnd.h"
#include "Kiss_FFT/kiss_fft129/kiss_fft.h"
// Copyright Epic Games, Inc. All Rights Reserved.

/
// USoundVisualizationStatics

DEFINE_LOG_CATEGORY_STATIC(LogSoundVisualization, Log, All);

void USoundVisualizationStaticsTool::GetAmplitude(USoundWave* SoundWave, int32 Channel, float StartTime, float TimeLength, int32 AmplitudeBuckets, TArray<float>& OutAmplitudes)
{
	OutAmplitudes.Empty();

#if WITH_EDITORONLY_DATA
	if (SoundWave)
	{
		if (Channel >= 0)
		{
			TArray< TArray<float> > Amplitudes;

			GetAmplitude(SoundWave, (Channel != 0), StartTime, TimeLength, AmplitudeBuckets, Amplitudes);

			if(Channel == 0)
			{
				OutAmplitudes = Amplitudes[0];
			}
			else if (Channel <= Amplitudes.Num())
			{
				OutAmplitudes = Amplitudes[Channel-1];
			}
			else
			{
				UE_LOG(LogSoundVisualization, Warning, TEXT("Requested channel %d, sound only has %d channels"), SoundWave->NumChannels);
			}
		}
		else
		{
			UE_LOG(LogSoundVisualization, Warning, TEXT("Invalid Channel (%d)"), Channel);
		}
	}
#else
	UE_LOG(LogSoundVisualization, Warning, TEXT("Get Amplitude does not work for cooked builds yet."));
#endif
}

bool USoundVisualizationStaticsTool::GetSoundAmplitudeAndFrequencyCurve(USoundWave* SoundWave, int32 Channel,
	float StartTime, float TimeLength, int32 AmplitudeBuckets, int32 SpectrumWidth, FString Floder)
{
	if (AmplitudeBuckets == 0 || SpectrumWidth == 0 || SoundWave == nullptr)
	{
		return false;
	}

	TArray<float> AmplitudeData;
	TArray<float> FrequencyData;
	
	GetAmplitude(SoundWave, Channel, StartTime, TimeLength, AmplitudeBuckets, AmplitudeData);
	CalculateFrequencySpectrum(SoundWave, Channel, StartTime, TimeLength, SpectrumWidth, FrequencyData);

	FAssetRegistryModule* const AssetRegistryModule = FModuleManager::Get().GetModulePtr<FAssetRegistryModule>("AssetRegistry");
	IAssetRegistry& AssetRegistry = AssetRegistryModule->GetRegistry();

	FString Path = "/Game";
	if (Floder.Len() != 0)
	{
		Path = Path / Floder;
	}

	FPlatformFileManager::Get().GetPlatformFile().CreateDirectoryTree(* FPaths::ConvertRelativePathToFull(Path));

	const FString AmplitudeName = "CF_" + SoundWave->GetName() + "_Amp";
	const FString FrequencyName = "CF_" + SoundWave->GetName() + "_Fre";
	const FString AmpAFreName = "CF_" + SoundWave->GetName() + "_FreAmp";
	const FString AmplitudePackage = Path / AmplitudeName;
	const FString FrequencyPackage = Path / FrequencyName;
	const FString AmpAFrePackage = Path / AmpAFreName;
	
	UPackage* AmpPackage = CreatePackage(*AmplitudePackage);
	UPackage* FrePackage = CreatePackage(*FrequencyPackage);
	UPackage* AmpAFrePack = CreatePackage(*AmpAFrePackage);
	
	UCurveFloatFactory* CurveFloatFactory = NewObject<UCurveFloatFactory>();
	CurveFloatFactory->SupportedClass = UCurveFloat::StaticClass();
	
	auto GeneratedImpl = [&](UPackage* Package, const FString& ResName, const TArray<float>& Data,
		std::function<void(UCurveFloat*, int Index)> Func)
	{
		UCurveFloat* CurveFloat = Cast<UCurveFloat>(CurveFloatFactory->FactoryCreateNew(UCurveFloat::StaticClass(),
			Package, *ResName, RF_Standalone | RF_Public, nullptr, nullptr));
	
		for (int i = 0; i < Data.Num(); ++i)
		{
			//const float Key = i * TimeSplit;
			//CurveFloat->FloatCurve.AddKey(Key, Data[i]);
			Func(CurveFloat, i);
		}

		AssetRegistry.AssetCreated(CurveFloat);
		Package->SetDirtyFlag(true);
		CurveFloat->MarkPackageDirty();
		CurveFloat->PostEditChange();
	
		TArray<UObject*> Assets;
		Assets.Add(CurveFloat);

		FContentBrowserModule& ContentBrowserModule = FModuleManager::Get().LoadModuleChecked<FContentBrowserModule>("ContentBrowser");
		ContentBrowserModule.Get().SyncBrowserToAssets(Assets);
	};
	
	const float TimeSplit_Amp = (TimeLength - StartTime) / AmplitudeBuckets;
	const float TimeSplit_Frq = (TimeLength - StartTime) / SpectrumWidth;
	
	GeneratedImpl(AmpPackage, AmplitudeName, AmplitudeData, [&](UCurveFloat* CurveFloat, int Index)
	{
		CurveFloat->FloatCurve.AddKey(TimeSplit_Amp * Index, AmplitudeData[Index]);
	});
	GeneratedImpl(FrePackage, FrequencyName, FrequencyData, [&](UCurveFloat* CurveFloat, int Index)
	{
		CurveFloat->FloatCurve.AddKey(TimeSplit_Frq * Index, FrequencyData[Index]);
	});
	GeneratedImpl(AmpAFrePack, AmpAFreName, FrequencyData, [&](UCurveFloat* CurveFloat, int Index)
	{
		if (FrequencyData.IsValidIndex(Index) && AmplitudeData.IsValidIndex(Index))
			CurveFloat->FloatCurve.AddKey(FrequencyData[Index], AmplitudeData[Index]);
	});
	return true;
}


void USoundVisualizationStaticsTool::GetAmplitude(USoundWave* SoundWave, const bool bSplitChannels, const float StartTime, const float TimeLength, const int32 AmplitudeBuckets, TArray< TArray<float> >& OutAmplitudes)
{

	OutAmplitudes.Empty();

#if WITH_EDITORONLY_DATA
	const int32 NumChannels = SoundWave->NumChannels;
	if (AmplitudeBuckets > 0 && NumChannels > 0)
	{
		// Setup the output data
		OutAmplitudes.AddZeroed((bSplitChannels ? NumChannels : 1));
		for (int32 ChannelIndex = 0; ChannelIndex < OutAmplitudes.Num(); ++ChannelIndex)
		{
			OutAmplitudes[ChannelIndex].AddZeroed(AmplitudeBuckets);
		}

		// check if there is any raw sound data
		if( SoundWave->RawData.GetBulkDataSize() > 0 )
		{
			// Lock raw wave data.
			uint8* RawWaveData = ( uint8* )SoundWave->RawData.Lock( LOCK_READ_ONLY );
			int32 RawDataSize = SoundWave->RawData.GetBulkDataSize();
			FWaveModInfo WaveInfo;

			// parse the wave data
			if( WaveInfo.ReadWaveHeader( RawWaveData, RawDataSize, 0 ) )
			{
				uint32 SampleCount = 0;
				uint32 SampleCounts[10] = {0};

				uint32 FirstSample = *WaveInfo.pSamplesPerSec * StartTime;
				uint32 LastSample = *WaveInfo.pSamplesPerSec * (StartTime + TimeLength);

				if (NumChannels <= 2)
				{
					SampleCount = WaveInfo.SampleDataSize / (2 * NumChannels);
				}
				else
				{
					for (int32 ChannelIndex = 0; ChannelIndex < NumChannels; ++ChannelIndex)
					{
						SampleCounts[ChannelIndex] = (SoundWave->ChannelSizes[ChannelIndex] / 2);
						SampleCount = FMath::Max(SampleCount, SampleCounts[ChannelIndex]);
						SampleCounts[ChannelIndex] -= FirstSample;
					}
				}

				FirstSample = FMath::Min(SampleCount, FirstSample);
				LastSample = FMath::Min(SampleCount, LastSample);

				const int16* SamplePtr = reinterpret_cast<const int16*>(WaveInfo.SampleDataStart);
				if (NumChannels <= 2)
				{
					SamplePtr += FirstSample;
				}

				uint32 SamplesPerAmplitude = (LastSample - FirstSample) / AmplitudeBuckets;
				uint32 ExcessSamples = (LastSample - FirstSample) % AmplitudeBuckets;

				for (int32 AmplitudeIndex = 0; AmplitudeIndex < AmplitudeBuckets; ++AmplitudeIndex)
				{
					if (NumChannels <= 2)
					{
						int64 SampleSum[2] = {0};
						uint32 SamplesToRead = SamplesPerAmplitude + (ExcessSamples-- > 0 ? 1 : 0);
						for (uint32 SampleIndex = 0; SampleIndex < SamplesToRead; ++SampleIndex)
						{
							for (int32 ChannelIndex = 0; ChannelIndex < NumChannels; ++ChannelIndex)
							{
								SampleSum[ChannelIndex] += FMath::Abs(*SamplePtr);
								SamplePtr++;
							}
						}
						for (int32 ChannelIndex = 0; ChannelIndex < NumChannels; ++ChannelIndex)
						{
							OutAmplitudes[(bSplitChannels ? ChannelIndex : 0)][AmplitudeIndex] = SampleSum[ChannelIndex] / (float)SamplesToRead;
						}
					}
					else
					{
						uint32 SamplesRead = 0;
						int64 SampleSum = 0;
						uint32 SamplesToRead = SamplesPerAmplitude + (ExcessSamples-- > 0 ? 1 : 0);
						for (int32 ChannelIndex = 0; ChannelIndex < NumChannels; ++ChannelIndex)
						{
							uint32 SamplesToReadForChannel = FMath::Min(SamplesToRead, SampleCounts[ChannelIndex]);

							if (SamplesToReadForChannel > 0)
							{
								if (bSplitChannels)
								{
									SampleSum = 0;
								}

								for (uint32 SampleIndex = 0; SampleIndex < SamplesToReadForChannel; ++SampleIndex)
								{
									SampleSum += FMath::Abs(*(SamplePtr + FirstSample + SampleIndex + SoundWave->ChannelOffsets[ChannelIndex] / 2));
								}

								if (bSplitChannels)
								{
									OutAmplitudes[ChannelIndex][AmplitudeIndex] = SampleSum / (float)SamplesToReadForChannel;
								}
								SamplesRead += SamplesToReadForChannel;
								SampleCounts[ChannelIndex] -= SamplesToReadForChannel;
							}
						}

						if (!bSplitChannels)
						{
							OutAmplitudes[0][AmplitudeIndex] = SampleSum / (float)SamplesRead;
						}

						FirstSample += SamplesToRead;
					}
				}
			}

			SoundWave->RawData.Unlock();
		}
	}
#else
	UE_LOG(LogSoundVisualization, Warning, TEXT("Get Amplitude does not work for cooked builds yet."));
#endif
}

void USoundVisualizationStaticsTool::CalculateFrequencySpectrum(USoundWave* SoundWave, int32 Channel, float StartTime, float TimeLength, int32 SpectrumWidth, TArray<float>& OutSpectrum)
{
	OutSpectrum.Empty();

#if WITH_EDITORONLY_DATA
	if (SoundWave)
	{
		if (SpectrumWidth <= 0)
		{
			UE_LOG(LogSoundVisualization, Warning, TEXT("Invalid SpectrumWidth (%d)"), SpectrumWidth);
		}
		else if (Channel < 0)
		{
			UE_LOG(LogSoundVisualization, Warning, TEXT("Invalid Channel (%d)"), Channel);
		}
		else
		{
			TArray< TArray<float> > Spectrums;

			CalculateFrequencySpectrum(SoundWave, (Channel != 0), StartTime, TimeLength, SpectrumWidth, Spectrums);

			if(Channel == 0)
			{
				OutSpectrum = Spectrums[0];
			}
			else if (Channel <= Spectrums.Num())
			{
				OutSpectrum = Spectrums[Channel-1];
			}
			else
			{
				UE_LOG(LogSoundVisualization, Warning, TEXT("Requested channel %d, sound only has %d channels"), SoundWave->NumChannels);
			}
		}
	}
#else	
	UE_LOG(LogSoundVisualization, Warning, TEXT("Calculate Frequency Spectrum does not work for cooked builds yet."));
#endif
}

float GetFFTInValue(const int16 SampleValue, const int16 SampleIndex, const int16 SampleCount)
{
	float FFTValue = SampleValue;

	// Apply the Hann window
	FFTValue *= 0.5f * (1 - FMath::Cos(2 * PI * SampleIndex / (SampleCount - 1)));

	return FFTValue;
}

void USoundVisualizationStaticsTool::CalculateFrequencySpectrum(USoundWave* SoundWave, const bool bSplitChannels, const float StartTime, const float TimeLength, const int32 SpectrumWidth, TArray< TArray<float> >& OutSpectrums)
{

	OutSpectrums.Empty();

#if WITH_EDITORONLY_DATA
	const int32 NumChannels = SoundWave->NumChannels;
	if (SpectrumWidth > 0 && NumChannels > 0)
	{
		// Setup the output data
		OutSpectrums.AddZeroed((bSplitChannels ? NumChannels : 1));
		for (int32 ChannelIndex = 0; ChannelIndex < OutSpectrums.Num(); ++ChannelIndex)
		{
			OutSpectrums[ChannelIndex].AddZeroed(SpectrumWidth);
		}

		// check if there is any raw sound data
		if( SoundWave->RawData.GetBulkDataSize() > 0 )
		{
			// Lock raw wave data.
			uint8* RawWaveData = ( uint8* )SoundWave->RawData.Lock( LOCK_READ_ONLY );
			int32 RawDataSize = SoundWave->RawData.GetBulkDataSize();
			FWaveModInfo WaveInfo;

			// parse the wave data
			if( WaveInfo.ReadWaveHeader( RawWaveData, RawDataSize, 0 ) )
			{
				int32 SampleCount = 0;
				int32 SampleCounts[10] = {0};

				int32 FirstSample = *WaveInfo.pSamplesPerSec * StartTime;
				int32 LastSample = *WaveInfo.pSamplesPerSec * (StartTime + TimeLength);

				if (NumChannels <= 2)
				{
					SampleCount = WaveInfo.SampleDataSize / (2 * NumChannels);
				}
				else
				{
					for (int32 ChannelIndex = 0; ChannelIndex < NumChannels; ++ChannelIndex)
					{
						SampleCounts[ChannelIndex] = (SoundWave->ChannelSizes[ChannelIndex] / 2);
						SampleCount = FMath::Max(SampleCount, SampleCounts[ChannelIndex]);
						SampleCounts[ChannelIndex] -= FirstSample;
					}
				}

				FirstSample = FMath::Min(SampleCount, FirstSample);
				LastSample = FMath::Min(SampleCount, LastSample);

				int32 SamplesToRead = LastSample - FirstSample;

				if (SamplesToRead > 0)
				{
					// Shift the window enough so that we get a power of 2
					int32 PoT = 2;
					while (SamplesToRead > PoT) PoT *= 2;
					FirstSample = FMath::Max(0, FirstSample - (PoT - SamplesToRead) / 2);
					SamplesToRead = PoT;
					LastSample = FirstSample + SamplesToRead;
					if (LastSample > SampleCount)
					{
						FirstSample = LastSample - SamplesToRead;
					}
					if (FirstSample < 0)
					{
						// If we get to this point we can't create a reasonable window so just give up
						SoundWave->RawData.Unlock();
						return;
					}

					kiss_fft_cpx* buf[10] = { 0 }; 
					kiss_fft_cpx* out[10] = { 0 };

					int32 Dims[1] = { SamplesToRead };
					kiss_fftnd_cfg stf = kiss_fftnd_alloc(Dims, 1, 0, NULL, NULL);


					const int16* SamplePtr = reinterpret_cast<const int16*>(WaveInfo.SampleDataStart);
					if (NumChannels <= 2)
					{
						for (int32 ChannelIndex = 0; ChannelIndex < NumChannels; ++ChannelIndex)
						{
							buf[ChannelIndex] = (kiss_fft_cpx *)KISS_FFT_MALLOC(sizeof(kiss_fft_cpx) * SamplesToRead);
							out[ChannelIndex] = (kiss_fft_cpx *)KISS_FFT_MALLOC(sizeof(kiss_fft_cpx) * SamplesToRead);
						}

						SamplePtr += (FirstSample * NumChannels);

						for (int32 SampleIndex = 0; SampleIndex < SamplesToRead; ++SampleIndex)
						{
							for (int32 ChannelIndex = 0; ChannelIndex < NumChannels; ++ChannelIndex)
							{
								buf[ChannelIndex][SampleIndex].r = GetFFTInValue(*SamplePtr, SampleIndex, SamplesToRead);
								buf[ChannelIndex][SampleIndex].i = 0.f;

								SamplePtr++;
							}
						}
					}
					else
					{
						for (int32 ChannelIndex = 0; ChannelIndex < NumChannels; ++ChannelIndex)
						{
							// Drop this channel out if there isn't the power of 2 number of samples available
							if (SampleCounts[ChannelIndex] >= SamplesToRead)
							{
								buf[ChannelIndex] = (kiss_fft_cpx *)KISS_FFT_MALLOC(sizeof(kiss_fft_cpx) * SamplesToRead);
								out[ChannelIndex] = (kiss_fft_cpx *)KISS_FFT_MALLOC(sizeof(kiss_fft_cpx) * SamplesToRead);

								for (int32 SampleIndex = 0; SampleIndex < SamplesToRead; ++SampleIndex)
								{
									buf[ChannelIndex][SampleIndex].r = GetFFTInValue(*(SamplePtr + FirstSample + SampleIndex + SoundWave->ChannelOffsets[ChannelIndex] / 2), SampleIndex, SamplesToRead);
									buf[ChannelIndex][SampleIndex].i = 0.f;
								}
							}
						}
					}

					for (int32 ChannelIndex = 0; ChannelIndex < NumChannels; ++ChannelIndex)
					{
						if (buf[ChannelIndex])
						{
							kiss_fftnd(stf, buf[ChannelIndex], out[ChannelIndex]);
						}
					}

					int32 SamplesPerSpectrum = SamplesToRead / (2 * SpectrumWidth);
					int32 ExcessSamples = SamplesToRead % (2 * SpectrumWidth);

					int32 FirstSampleForSpectrum = 1;
					for (int32 SpectrumIndex = 0; SpectrumIndex < SpectrumWidth; ++SpectrumIndex)
					{
						static bool doLog = false;

						int32 SamplesRead = 0;
						double SampleSum = 0;
						int32 SamplesForSpectrum = SamplesPerSpectrum + (ExcessSamples-- > 0 ? 1 : 0);
						if (doLog) UE_LOG(LogSoundVisualization, Log, TEXT("----"));
						for (int32 ChannelIndex = 0; ChannelIndex < NumChannels; ++ChannelIndex)
						{
							if (out[ChannelIndex])
							{
								if (bSplitChannels)
								{
									SampleSum = 0;
								}

								for (int32 SampleIndex = 0; SampleIndex < SamplesForSpectrum; ++SampleIndex)
								{
									float PostScaledR = out[ChannelIndex][FirstSampleForSpectrum + SampleIndex].r * 2.f / SamplesToRead;
									float PostScaledI = out[ChannelIndex][FirstSampleForSpectrum + SampleIndex].i * 2.f / SamplesToRead;
									//float Val = FMath::Sqrt(FMath::Square(PostScaledR) + FMath::Square(PostScaledI));
									float Val = 10.f * FMath::LogX(10.f, FMath::Square(PostScaledR) + FMath::Square(PostScaledI));
									if (doLog) UE_LOG(LogSoundVisualization, Log, TEXT("%.2f"), Val);
									SampleSum += Val;
								}

								if (bSplitChannels)
								{
									OutSpectrums[ChannelIndex][SpectrumIndex] = (float)(SampleSum / SamplesForSpectrum);
								}
								SamplesRead += SamplesForSpectrum;
							}
						}

						if (!bSplitChannels)
						{
							OutSpectrums[0][SpectrumIndex] = (float)(SampleSum / SamplesRead);
						}

						FirstSampleForSpectrum += SamplesForSpectrum;
					}

					KISS_FFT_FREE(stf);
					for (int32 ChannelIndex = 0; ChannelIndex < NumChannels; ++ChannelIndex)
					{
						if (buf[ChannelIndex])
						{
							KISS_FFT_FREE(buf[ChannelIndex]);
							KISS_FFT_FREE(out[ChannelIndex]);
						}
					}
				}
			}

			SoundWave->RawData.Unlock();
		}
	}
#else
	UE_LOG(LogSoundVisualization, Warning, TEXT("Calculate Frequency Spectrum does not work for cooked builds yet."));
#endif
}



个人总结,不足之处见谅,欢迎指点。

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值