|
import os |
|
import json |
|
from pydub import AudioSegment |
|
from multiprocessing import Pool, cpu_count |
|
from functools import partial |
|
from tqdm import tqdm |
|
|
|
def load_tsv(file_path): |
|
with open(file_path, 'r') as file: |
|
lines = file.readlines() |
|
lines = lines[1:] |
|
output = [] |
|
for line in lines: |
|
splitted = line.strip().split('\t') |
|
output.append({ |
|
"TRACK_ID": splitted[0], |
|
"ARTIST_ID": splitted[1], |
|
"ALBUM_ID": splitted[2], |
|
"PATH": splitted[3], |
|
"DURATION": float(splitted[4]), |
|
"TAGS": splitted[5:], |
|
}) |
|
return output |
|
|
|
|
|
def write_jsonl(data, output_file): |
|
with open(output_file, 'w') as file: |
|
for item in data: |
|
json.dump(item, file) |
|
file.write('\n') |
|
|
|
|
|
def get_audio_info(audio_path): |
|
""" |
|
Extract duration (seconds), sample_rate (Hz), num_samples (int), |
|
bit_depth (bits), and channels (int) from an audio file. |
|
""" |
|
audio = AudioSegment.from_file(audio_path) |
|
duration = audio.duration_seconds |
|
sample_rate = audio.frame_rate |
|
num_samples = int(audio.frame_count()) |
|
sample_width = audio.sample_width |
|
bit_depth = sample_width * 8 |
|
channels = audio.channels |
|
return duration, sample_rate, num_samples, bit_depth, channels |
|
|
|
|
|
def process_item(tsv_dict, prefix): |
|
path = os.path.join(prefix, tsv_dict["PATH"]).replace(".mp3", ".low.flac") |
|
try: |
|
duration, sr, num_samples, bit_depth, channels = get_audio_info(path) |
|
return { |
|
"audio_path": path, |
|
"label": tsv_dict["TAGS"], |
|
"duration": duration, |
|
"sample_rate": sr, |
|
"num_samples": num_samples, |
|
"bit_depth": bit_depth, |
|
"channels": channels |
|
} |
|
except Exception as e: |
|
print(f"Error reading {path}: {e}") |
|
return None |
|
|
|
|
|
def convert_mtg_to_jsonl(output_dir, task='MTGTop50'): |
|
task2tsv = { |
|
"MTGTop50": "top50tags", |
|
"MTGGenre": "genre", |
|
"MTGInstrument": "instrument", |
|
"MTGMood": "moodtheme", |
|
} |
|
tsv_name = task2tsv[task] |
|
|
|
splits = { |
|
'train': f"mtg-jamendo-dataset/data/splits/split-0/autotagging_{tsv_name}-train.tsv", |
|
'val': f"mtg-jamendo-dataset/data/splits/split-0/autotagging_{tsv_name}-validation.tsv", |
|
'test': f"mtg-jamendo-dataset/data/splits/split-0/autotagging_{tsv_name}-test.tsv", |
|
} |
|
prefix = os.path.join(output_dir, "audio") |
|
|
|
os.makedirs(output_dir, exist_ok=True) |
|
|
|
for split, rel_path in splits.items(): |
|
tsv_path = os.path.join(output_dir, rel_path) |
|
tsv_list = load_tsv(tsv_path) |
|
out_list = [] |
|
|
|
|
|
worker = partial(process_item, prefix=prefix) |
|
with Pool(processes=cpu_count()) as pool: |
|
for result in tqdm(pool.imap(worker, tsv_list), total=len(tsv_list), desc=f"Processing {task} {split}"): |
|
if result: |
|
out_list.append(result) |
|
|
|
|
|
out_file = os.path.join(output_dir, f"{task}.{split}.jsonl") |
|
write_jsonl(out_list, out_file) |
|
print(f"{split} done: {len(out_list)} items written to {out_file}") |
|
|
|
|
|
if __name__ == "__main__": |
|
output_base = "data/MTG" |
|
tasks = ['MTGTop50', 'MTGGenre', 'MTGInstrument', 'MTGMood'] |
|
for task in tasks: |
|
convert_mtg_to_jsonl(output_base, task) |
|
print("All tasks completed.") |
|
|