| import os |
| import torch |
| from transformers import WhisperProcessor, WhisperForConditionalGeneration |
| import librosa |
|
|
| |
| device = "cuda" if torch.cuda.is_available() else "cpu" |
| print(f"Using device: {device}") |
|
|
| |
| token = os.getenv("HF_TOKEN") |
| processor = WhisperProcessor.from_pretrained("jiviai/audioX-south-v1") |
| model = WhisperForConditionalGeneration.from_pretrained("jiviai/audioX-south-v1").to(device) |
| model.config.forced_decoder_ids = None |
|
|
| |
| audio_path = "sample.wav" |
| audio_np, sr = librosa.load(audio_path, sr=None) |
| if sr != 16000: |
| audio_np = librosa.resample(audio_np, orig_sr=sr, target_sr=16000) |
|
|
| input_features = processor(audio_np, sampling_rate=16000, return_tensors="pt").to(device).input_features |
|
|
| |
| |
| |
| predicted_ids = model.generate(input_features, task="transcribe", language="ta") |
|
|
| |
| transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0] |
| print(transcription) |
|
|