Question: Stream Audio and Lip sync system #127
-
Quick question: It seems that the "Stream Audio Source" script isn't compatible with any lip sync system that reads data from an AudioSource (like https://github.com/gerardllorach/threelipsync or https://assetstore.unity.com/packages/tools/animation/salsa-lipsync-suite-148442). Do you have any suggestions for converting the streamed audio into something that can be fed into an AudioSource? |
Beta Was this translation helpful? Give feedback.
Replies: 1 comment 1 reply
-
Hi @ariok, thanks for using my package. I am actually using Salsa for one of my client projects, so I know it is possible. I ended up making a few edits to the audio stream class I have in my audio utils package: to use just replace the using CrazyMinnow.SALSA;
using System.Collections.Concurrent;
using System.Threading.Tasks;
using UnityEngine;
using Utilities.Extensions;
namespace Elevenlabs.Extensions
{
[RequireComponent(typeof(Salsa))]
[RequireComponent(typeof(AudioSource))]
public class SalsaAudioStream : MonoBehaviour
{
[SerializeField]
private AudioSource audioSource;
private readonly ConcurrentQueue<float> audioBuffer = new();
public bool IsEmpty => audioBuffer.IsEmpty;
[SerializeField]
private Salsa salsaInstance;
private float[] analysisBuffer;
private void OnValidate()
{
if (audioSource == null)
{
audioSource = GetComponent<AudioSource>();
}
audioSource.Validate();
if (salsaInstance == null)
{
salsaInstance = GetComponent<Salsa>();
}
}
private void Awake()
{
OnValidate();
if (!Application.isPlaying) { return; }
salsaInstance.Validate();
salsaInstance.getExternalAnalysis = GetAnalysisValueLeveragingSalsaAnalyzer;
}
private void OnAudioFilterRead(float[] data, int channels)
{
if (analysisBuffer == null || analysisBuffer.Length != data.Length)
{
analysisBuffer = new float[data.Length];
}
if (audioBuffer.Count < data.Length)
{
for (var i = 0; i < data.Length; i++)
{
analysisBuffer[i] = 0f;
}
return;
}
for (var i = 0; i < data.Length; i += channels)
{
if (audioBuffer.TryDequeue(out var sample))
{
for (var j = 0; j < channels; j++)
{
data[i + j] = sample;
}
}
// copy the audio data to the analysis buffer for salsa
for (var j = 0; j < channels; j++)
{
analysisBuffer[i + j] = data[i + j];
}
}
}
public async Task BufferCallbackAsync(float[] samples)
{
foreach (var sample in samples)
{
audioBuffer.Enqueue(sample);
}
await Task.Yield();
}
private float GetAnalysisValueLeveragingSalsaAnalyzer()
{
// If you need more control over the analysis, process the buffer
// here and then return the analysis. Since only the first channel of
// audio data is stored in the 'analysisBuffer' (in this example), the
// 'interleave' value is initialized as '1' -- we've already
// separated the data in the callback, so we want to analyze all of it.
return salsaInstance.audioAnalyzer(1, analysisBuffer);
}
}
} |
Beta Was this translation helpful? Give feedback.
Hi @ariok, thanks for using my package. I am actually using Salsa for one of my client projects, so I know it is possible.
I ended up making a few edits to the audio stream class I have in my audio utils package:
to use just replace the
StreamAudioSource
withSalsaAudioStream