using Microsoft.CognitiveServices.Speech; using Microsoft.CognitiveServices.Speech.Audio; namespace AzureAi.Transcriber.Services; public interface ITranscribeService { Task Transcribe(string filePath); } public class TranscribeService: ITranscribeService { private static string _speechKey = Environment.GetEnvironmentVariable("SPEECH_KEY") ?? string.Empty; private static string _speechRegion = Environment.GetEnvironmentVariable("SPEECH_REGION") ?? string.Empty; private ILogger Logger; public TranscribeService(ILogger logger) { Logger = logger; if(string.IsNullOrWhiteSpace(_speechKey) || string.IsNullOrWhiteSpace(_speechRegion)) { throw new InvalidOperationException("Speech key and region must be set in environment variables."); } } public async Task Transcribe(string filePath) { Logger.LogInformation("Transcribing {filePath}", filePath); await Task.Delay(3000); var speechConfig = SpeechConfig.FromSubscription(_speechKey, _speechRegion); speechConfig.SpeechRecognitionLanguage = "en-US"; using var audioConfig = AudioConfig.FromWavFileInput(filePath); using var recognizer = new SpeechRecognizer(speechConfig, audioConfig); var result = await recognizer.RecognizeOnceAsync(); return result; } }