| """ |
| Copyright (c) Meta Platforms, Inc. and affiliates. |
| All rights reserved. |
| |
| This source code is licensed under the license found in the |
| LICENSE file in the root directory of this source tree. |
| """ |
|
|
| from tempfile import NamedTemporaryFile |
| import torch |
| import gradio as gr |
| from scipy.io.wavfile import write |
|
|
| from audiocraft.models import MusicGen |
| import tempfile |
| import os |
| from audiocraft.data.audio import audio_write |
|
|
|
|
| MODEL = None |
|
|
| import yt_dlp as youtube_dl |
| from moviepy.editor import VideoFileClip |
|
|
| YT_LENGTH_LIMIT_S = 480 |
|
|
| def download_yt_audio(yt_url, filename): |
| info_loader = youtube_dl.YoutubeDL() |
| |
| try: |
| info = info_loader.extract_info(yt_url, download=False) |
| except youtube_dl.utils.DownloadError as err: |
| raise gr.Error(str(err)) |
| |
| file_length = info["duration_string"] |
| file_h_m_s = file_length.split(":") |
| file_h_m_s = [int(sub_length) for sub_length in file_h_m_s] |
| |
| if len(file_h_m_s) == 1: |
| file_h_m_s.insert(0, 0) |
| if len(file_h_m_s) == 2: |
| file_h_m_s.insert(0, 0) |
| file_length_s = file_h_m_s[0] * 3600 + file_h_m_s[1] * 60 + file_h_m_s[2] |
| |
| if file_length_s > YT_LENGTH_LIMIT_S: |
| yt_length_limit_hms = time.strftime("%HH:%MM:%SS", time.gmtime(YT_LENGTH_LIMIT_S)) |
| file_length_hms = time.strftime("%HH:%MM:%SS", time.gmtime(file_length_s)) |
| raise gr.Error(f"Maximum YouTube length is {yt_length_limit_hms}, got {file_length_hms} YouTube video.") |
| |
| ydl_opts = {"outtmpl": filename, "format": "worstvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best"} |
| |
| with youtube_dl.YoutubeDL(ydl_opts) as ydl: |
| try: |
| ydl.download([yt_url]) |
| except youtube_dl.utils.ExtractorError as err: |
| raise gr.Error(str(err)) |
|
|
|
|
| def convert_to_mp3(input_path, output_path): |
| try: |
| video_clip = VideoFileClip(input_path) |
| audio_clip = video_clip.audio |
| print("Converting to MP3...") |
| audio_clip.write_audiofile(output_path) |
| except Exception as e: |
| print("Error:", e) |
|
|
| def load_youtube_audio(yt_link): |
| |
| with tempfile.TemporaryDirectory() as tmpdirname: |
| filepath = os.path.join(tmpdirname, "video.mp4") |
| download_yt_audio(yt_link, filepath) |
|
|
| mp3_output_path = "video_sound.mp3" |
| convert_to_mp3(filepath, mp3_output_path) |
| print("Conversion complete. MP3 saved at:", mp3_output_path) |
| |
| return mp3_output_path |
|
|
| def split_process(audio, chosen_out_track): |
| os.makedirs("out", exist_ok=True) |
| write('test.wav', audio[0], audio[1]) |
| os.system("python3 -m demucs.separate -n mdx_extra_q -j 4 test.wav -o out") |
| |
| if chosen_out_track == "vocals": |
| return "./out/mdx_extra_q/test/vocals.wav" |
| elif chosen_out_track == "bass": |
| return "./out/mdx_extra_q/test/bass.wav" |
| elif chosen_out_track == "drums": |
| return "./out/mdx_extra_q/test/drums.wav" |
| elif chosen_out_track == "other": |
| return "./out/mdx_extra_q/test/other.wav" |
| elif chosen_out_track == "all-in": |
| return "test.wav" |
| |
| def load_model(version): |
| print("Loading model", version) |
| return MusicGen.get_pretrained(version) |
|
|
|
|
| def predict(music_prompt, melody, duration, cfg_coef): |
| text = music_prompt |
| global MODEL |
| topk = int(250) |
| if MODEL is None or MODEL.name != "melody": |
| MODEL = load_model("melody") |
|
|
| if duration > MODEL.lm.cfg.dataset.segment_duration: |
| raise gr.Error("MusicGen currently supports durations of up to 30 seconds!") |
| MODEL.set_generation_params( |
| use_sampling=True, |
| top_k=250, |
| top_p=0, |
| temperature=1.0, |
| cfg_coef=cfg_coef, |
| duration=duration, |
| ) |
|
|
| if melody: |
| sr, melody = melody[0], torch.from_numpy(melody[1]).to(MODEL.device).float().t().unsqueeze(0) |
| print(melody.shape) |
| if melody.dim() == 2: |
| melody = melody[None] |
| melody = melody[..., :int(sr * MODEL.lm.cfg.dataset.segment_duration)] |
| output = MODEL.generate_with_chroma( |
| descriptions=[text], |
| melody_wavs=melody, |
| melody_sample_rate=sr, |
| progress=False |
| ) |
| else: |
| output = MODEL.generate(descriptions=[text], progress=False) |
|
|
| output = output.detach().cpu().float()[0] |
| with NamedTemporaryFile("wb", suffix=".wav", delete=False) as file: |
| audio_write(file.name, output, MODEL.sample_rate, strategy="loudness", add_suffix=False) |
| |
| return file.name |
|
|
| css=""" |
| #col-container {max-width: 910px; margin-left: auto; margin-right: auto;} |
| a {text-decoration-line: underline; font-weight: 600;} |
| """ |
|
|
| with gr.Blocks(css=css) as demo: |
| with gr.Column(elem_id="col-container"): |
| gr.Markdown( |
| """ |
| # Split Audio Tracks to MusicGen |
| Upload an audio file, split audio tracks with Demucs, choose a track as conditional sound for MusicGen, get a remix ! <br/> |
| *** Careful, MusicGen model loaded here can only handle up to 30 second audio, please use the audio component gradio feature to edit your audio before conditioning *** |
| <br/> |
| <br/> |
| [](https://huggingface.co/spaces/fffiloni/SplitTrack2MusicGen?duplicate=true) for longer audio, more control and no queue.</p> |
| """ |
| ) |
| |
| with gr.Column(): |
| uploaded_sound = gr.Audio(type="numpy", label="Input", sources="upload") |
| with gr.Row(): |
| youtube_link = gr.Textbox(show_label=False, placeholder="TEMPORARILY DISABLED • you can also paste YT link and load it", interactive=False) |
| yt_load_btn = gr.Button("Load YT song", interactive=False) |
| |
| with gr.Row(): |
| chosen_track = gr.Radio(["vocals", "bass", "drums", "other", "all-in"], label="Track", info="Which track from your audio do you want to mashup ?", value="vocals") |
| load_sound_btn = gr.Button('Load your chosen track') |
| |
| |
| |
| |
| |
| with gr.Row(): |
| music_prompt = gr.Textbox(label="Musical Prompt", info="Describe what kind of music you wish for", interactive=True, placeholder="lofi slow bpm electro chill with organic samples") |
| melody = gr.Audio(sources="upload", type="numpy", label="Track Condition (from previous step)", interactive=False) |
| with gr.Row(): |
| |
| duration = gr.Slider(minimum=1, maximum=30, value=10, step=1, label="Generated Music Duration", interactive=True) |
| cfg_coef = gr.Slider(label="Classifier Free Guidance", minimum=1.0, maximum=10.0, step=0.1, value=3.0, interactive=True) |
| with gr.Row(): |
| submit = gr.Button("Submit") |
| |
| |
| |
| |
| |
| |
| output = gr.Audio(label="Generated Music") |
|
|
| gr.Examples( |
| fn=predict, |
| examples=[ |
| [ |
| "An 80s driving pop song with heavy drums and synth pads in the background", |
| None, |
| 10, |
| 3.0 |
| ], |
| [ |
| "A cheerful country song with acoustic guitars", |
| None, |
| 10, |
| 3.0 |
| ], |
| [ |
| "90s rock song with electric guitar and heavy drums", |
| None, |
| 10, |
| 3.0 |
| ], |
| [ |
| "a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions bpm: 130", |
| None, |
| 10, |
| 3.0 |
| ], |
| [ |
| "lofi slow bpm electro chill with organic samples", |
| None, |
| 10, |
| 3.0 |
| ], |
| ], |
| inputs=[music_prompt, melody, duration, cfg_coef], |
| outputs=[output] |
| ) |
| yt_load_btn.click(fn=load_youtube_audio, inputs=[youtube_link], outputs=[uploaded_sound], queue=False, api_name=False) |
| load_sound_btn.click(split_process, inputs=[uploaded_sound, chosen_track], outputs=[melody], api_name="splt_trck") |
| submit.click(predict, inputs=[music_prompt, melody, duration, cfg_coef], outputs=[output]) |
| |
|
|
| demo.queue(max_size=32).launch() |
|
|