File size: 1,100 Bytes
830b470
 
 
 
 
f71ba81
830b470
 
 
 
 
 
f71ba81
 
 
 
830b470
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
"""
Pulls raw samples of 10k each from the [cited in README] datasets used in this project.
In the final version of the training data, a lot of the example outputs are tuned, and they are all merged into a single 

HuggingFace seems to have disabled this functionality.
Currently trying to see how to work around it
"""

import json
from datasets import load_dataset

targets = {
    "mediasum": ("nbroad/mediasum", None, "train"),
    "dialogsum": ("knkarthick/dialogsum", None, "train"),
    "squality": ("mattercalm/squality", None, "train"),
    "msmarco_corpus": ("Hyukkyu/beir-msmarco", "corpus", "train"),
}

for name, (repo, config, split) in targets.items():
    # load with generic loader (no trust_remote_code)
    if config:
        ds = load_dataset(repo, config, split=split)
    else:
        ds = load_dataset(repo, split=split)

    # take first 10k (shuffling in memory)
    small = ds.shuffle(seed=42).select(range(10_000))

    out = f"{name}_10k.jsonl"
    with open(out, "w", encoding="utf-8") as f:
        for example in small:
            f.write(json.dumps(example) + "\n")