from transformers import pipeline class SpeechRecognizer: def __init__(self, model_size="tiny"): print(f"Loading Whisper {model_size} model...") self.model = pipeline( "automatic-speech-recognition", model=f"openai/whisper-{model_size}", device=-1 ) print("Pipeline created successfully") def transcribe_audio(self, audio_path): result = self.model( audio_path, generate_kwargs={"language": "english"} ) return result["text"]