| import pandas as pd |
| from process_from_parquet import read_parquet_file, process_parquet_df, save_to_csv |
| from process_audio import process_audio_column |
|
|
|
|
| def process_partition(partition, process_row_with_params): |
| """ |
| Process the partition after first row processing. |
| Covert the series result to dataframe to further processing for audio partition. |
| |
| """ |
| result = partition.apply(process_row_with_params, axis=1) |
| field_name = ["path", "url" ,"type", "duration", "language", "transcript", "tag", "split", "license", "audio"] |
| return pd.DataFrame(result.tolist(), columns=field_name) |
|
|
| def _get_split(parquet_file): |
| if "train" in parquet_file: |
| return "train" |
| elif "test" in parquet_file: |
| return "test" |
| elif "dev" in parquet_file: |
| return "validation" |
| else: |
| return "train" |
|
|
| def process_row(row, parquet_file_name): |
| """ |
| The function to process each row from dataframe. |
| Return the metadata as dictionary. |
| |
| """ |
|
|
| metadata = {} |
| |
| metadata["audio"] = row["audio"] |
| metadata["url"] = f"https://huggingface.co/datasets/meetween/mumospee_librispeech/resolve/main/librispeech-parquet/{parquet_file_name}" |
| metadata["transcript"] = row["text"] |
| metadata["type"] = "audio" |
| metadata["language"] = "en" |
| metadata["tag"] = "Librispeech" |
| metadata["split"] = _get_split(parquet_file_name) |
| metadata["license"] = "CC-BY-4.0" |
|
|
| return metadata |
|
|
| def main(config): |
| parquet_df, file_name = read_parquet_file(config["parquet_file_path"], top=config["top"]) |
|
|
| processed_df = process_parquet_df(parquet_df=parquet_df, |
| file_name=file_name, |
| process_row_func=process_row, |
| process_partition=process_partition) |
| |
|
|
| result_df = process_audio_column(processed_df) |
|
|
| save_to_csv(result_df, final_path=config["final_path"]) |
|
|
|
|
|
|