README.md CHANGED
@@ -50,17 +50,17 @@ dataset_info:
50
  dtype: string
51
  splits:
52
  - name: train
53
- num_bytes: 5850594255
54
  num_examples: 1880853
55
  - name: test
56
- num_bytes: 308625761
57
  num_examples: 100529
58
  - name: validation
59
- num_bytes: 274563914
60
  num_examples: 89154
61
- download_size: 1963220197
62
- dataset_size: 6433783930
63
- - config_name: go
64
  features:
65
  - name: repository_name
66
  dtype: string
@@ -86,17 +86,17 @@ dataset_info:
86
  dtype: string
87
  splits:
88
  - name: train
89
- num_bytes: 738151570
90
- num_examples: 317832
91
  - name: test
92
- num_bytes: 32286894
93
- num_examples: 14291
94
  - name: validation
95
- num_bytes: 26888423
96
- num_examples: 14242
97
- download_size: 228520726
98
- dataset_size: 797326887
99
- - config_name: java
100
  features:
101
  - name: repository_name
102
  dtype: string
@@ -122,17 +122,17 @@ dataset_info:
122
  dtype: string
123
  splits:
124
  - name: train
125
- num_bytes: 1429270143
126
- num_examples: 454451
127
  - name: test
128
- num_bytes: 82377090
129
- num_examples: 26909
130
  - name: validation
131
- num_bytes: 42358211
132
- num_examples: 15328
133
- download_size: 425659927
134
- dataset_size: 1554005444
135
- - config_name: javascript
136
  features:
137
  - name: repository_name
138
  dtype: string
@@ -158,17 +158,17 @@ dataset_info:
158
  dtype: string
159
  splits:
160
  - name: train
161
- num_bytes: 480285847
162
- num_examples: 123889
163
  - name: test
164
- num_bytes: 24056920
165
- num_examples: 6483
166
  - name: validation
167
- num_bytes: 30168190
168
- num_examples: 8253
169
- download_size: 177637817
170
- dataset_size: 534510957
171
- - config_name: php
172
  features:
173
  - name: repository_name
174
  dtype: string
@@ -194,17 +194,17 @@ dataset_info:
194
  dtype: string
195
  splits:
196
  - name: train
197
- num_bytes: 1532562114
198
- num_examples: 523712
199
  - name: test
200
- num_bytes: 80203721
201
- num_examples: 28391
202
  - name: validation
203
- num_bytes: 78163768
204
- num_examples: 26015
205
- download_size: 507426963
206
- dataset_size: 1690929603
207
- - config_name: python
208
  features:
209
  - name: repository_name
210
  dtype: string
@@ -230,17 +230,17 @@ dataset_info:
230
  dtype: string
231
  splits:
232
  - name: train
233
- num_bytes: 1559643126
234
- num_examples: 412178
235
  - name: test
236
- num_bytes: 84341908
237
- num_examples: 22176
238
  - name: validation
239
- num_bytes: 92154630
240
- num_examples: 23107
241
- download_size: 581272659
242
- dataset_size: 1736139664
243
- - config_name: ruby
244
  features:
245
  - name: repository_name
246
  dtype: string
@@ -266,16 +266,16 @@ dataset_info:
266
  dtype: string
267
  splits:
268
  - name: train
269
- num_bytes: 110681455
270
- num_examples: 48791
271
  - name: test
272
- num_bytes: 5359228
273
- num_examples: 2279
274
  - name: validation
275
- num_bytes: 4830692
276
- num_examples: 2209
277
- download_size: 42266439
278
- dataset_size: 120871375
279
  config_names:
280
  - all
281
  - go
@@ -284,64 +284,6 @@ config_names:
284
  - php
285
  - python
286
  - ruby
287
- configs:
288
- - config_name: all
289
- data_files:
290
- - split: train
291
- path: all/train-*
292
- - split: test
293
- path: all/test-*
294
- - split: validation
295
- path: all/validation-*
296
- default: true
297
- - config_name: go
298
- data_files:
299
- - split: train
300
- path: go/train-*
301
- - split: test
302
- path: go/test-*
303
- - split: validation
304
- path: go/validation-*
305
- - config_name: java
306
- data_files:
307
- - split: train
308
- path: java/train-*
309
- - split: test
310
- path: java/test-*
311
- - split: validation
312
- path: java/validation-*
313
- - config_name: javascript
314
- data_files:
315
- - split: train
316
- path: javascript/train-*
317
- - split: test
318
- path: javascript/test-*
319
- - split: validation
320
- path: javascript/validation-*
321
- - config_name: php
322
- data_files:
323
- - split: train
324
- path: php/train-*
325
- - split: test
326
- path: php/test-*
327
- - split: validation
328
- path: php/validation-*
329
- - config_name: python
330
- data_files:
331
- - split: train
332
- path: python/train-*
333
- - split: test
334
- path: python/test-*
335
- - split: validation
336
- path: python/validation-*
337
- - config_name: ruby
338
- data_files:
339
- - split: train
340
- path: ruby/train-*
341
- - split: test
342
- path: ruby/test-*
343
- - split: validation
344
- path: ruby/validation-*
345
  ---
346
 
347
  # Dataset Card for CodeSearchNet corpus
 
50
  dtype: string
51
  splits:
52
  - name: train
53
+ num_bytes: 5850604083
54
  num_examples: 1880853
55
  - name: test
56
+ num_bytes: 308626333
57
  num_examples: 100529
58
  - name: validation
59
+ num_bytes: 274564382
60
  num_examples: 89154
61
+ download_size: 5117370511
62
+ dataset_size: 6433794798
63
+ - config_name: java
64
  features:
65
  - name: repository_name
66
  dtype: string
 
86
  dtype: string
87
  splits:
88
  - name: train
89
+ num_bytes: 1429272535
90
+ num_examples: 454451
91
  - name: test
92
+ num_bytes: 82377246
93
+ num_examples: 26909
94
  - name: validation
95
+ num_bytes: 42358315
96
+ num_examples: 15328
97
+ download_size: 1060569153
98
+ dataset_size: 1554008096
99
+ - config_name: go
100
  features:
101
  - name: repository_name
102
  dtype: string
 
122
  dtype: string
123
  splits:
124
  - name: train
125
+ num_bytes: 738153234
126
+ num_examples: 317832
127
  - name: test
128
+ num_bytes: 32286998
129
+ num_examples: 14291
130
  - name: validation
131
+ num_bytes: 26888527
132
+ num_examples: 14242
133
+ download_size: 487525935
134
+ dataset_size: 797328759
135
+ - config_name: python
136
  features:
137
  - name: repository_name
138
  dtype: string
 
158
  dtype: string
159
  splits:
160
  - name: train
161
+ num_bytes: 1559645310
162
+ num_examples: 412178
163
  - name: test
164
+ num_bytes: 84342064
165
+ num_examples: 22176
166
  - name: validation
167
+ num_bytes: 92154786
168
+ num_examples: 23107
169
+ download_size: 940909997
170
+ dataset_size: 1736142160
171
+ - config_name: javascript
172
  features:
173
  - name: repository_name
174
  dtype: string
 
194
  dtype: string
195
  splits:
196
  - name: train
197
+ num_bytes: 480286523
198
+ num_examples: 123889
199
  - name: test
200
+ num_bytes: 24056972
201
+ num_examples: 6483
202
  - name: validation
203
+ num_bytes: 30168242
204
+ num_examples: 8253
205
+ download_size: 1664713350
206
+ dataset_size: 534511737
207
+ - config_name: ruby
208
  features:
209
  - name: repository_name
210
  dtype: string
 
230
  dtype: string
231
  splits:
232
  - name: train
233
+ num_bytes: 110681715
234
+ num_examples: 48791
235
  - name: test
236
+ num_bytes: 5359280
237
+ num_examples: 2279
238
  - name: validation
239
+ num_bytes: 4830744
240
+ num_examples: 2209
241
+ download_size: 111758028
242
+ dataset_size: 120871739
243
+ - config_name: php
244
  features:
245
  - name: repository_name
246
  dtype: string
 
266
  dtype: string
267
  splits:
268
  - name: train
269
+ num_bytes: 1532564870
270
+ num_examples: 523712
271
  - name: test
272
+ num_bytes: 80203877
273
+ num_examples: 28391
274
  - name: validation
275
+ num_bytes: 78163924
276
+ num_examples: 26015
277
+ download_size: 851894048
278
+ dataset_size: 1690932671
279
  config_names:
280
  - all
281
  - go
 
284
  - php
285
  - python
286
  - ruby
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
  ---
288
 
289
  # Dataset Card for CodeSearchNet corpus
code_search_net.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """CodeSearchNet corpus: proxy dataset for semantic code search"""
18
+
19
+ # TODO: add licensing info in the examples
20
+ # TODO: log richer informations (especially while extracting the jsonl.gz files)
21
+ # TODO: enable custom configs; such as: "java+python"
22
+ # TODO: enable fetching examples with a given license, eg: "java_MIT"
23
+
24
+
25
+ import json
26
+ import os
27
+
28
+ import datasets
29
+
30
+
31
+ _CITATION = """\
32
+ @article{husain2019codesearchnet,
33
+ title={{CodeSearchNet} challenge: Evaluating the state of semantic code search},
34
+ author={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc},
35
+ journal={arXiv preprint arXiv:1909.09436},
36
+ year={2019}
37
+ }
38
+ """
39
+
40
+ _DESCRIPTION = """\
41
+ CodeSearchNet corpus contains about 6 million functions from open-source code \
42
+ spanning six programming languages (Go, Java, JavaScript, PHP, Python, and Ruby). \
43
+ The CodeSearchNet Corpus also contains automatically generated query-like \
44
+ natural language for 2 million functions, obtained from mechanically scraping \
45
+ and preprocessing associated function documentation.
46
+ """
47
+
48
+ _HOMEPAGE = "https://github.com/github/CodeSearchNet"
49
+
50
+ _LICENSE = "Various"
51
+
52
+ _DATA_DIR_URL = "data/"
53
+ _AVAILABLE_LANGUAGES = ["python", "java", "javascript", "go", "ruby", "php"]
54
+ _URLs = {language: _DATA_DIR_URL + f"{language}.zip" for language in _AVAILABLE_LANGUAGES}
55
+ # URLs for "all" are just the concatenation of URLs for all languages
56
+ _URLs["all"] = _URLs.copy()
57
+
58
+
59
+ class CodeSearchNet(datasets.GeneratorBasedBuilder):
60
+ """ "CodeSearchNet corpus: proxy dataset for semantic code search."""
61
+
62
+ VERSION = datasets.Version("1.0.0", "Add CodeSearchNet corpus dataset")
63
+ BUILDER_CONFIGS = [
64
+ datasets.BuilderConfig(
65
+ name="all",
66
+ version=VERSION,
67
+ description="All available languages: Java, Go, Javascript, Python, PHP, Ruby",
68
+ ),
69
+ datasets.BuilderConfig(
70
+ name="java",
71
+ version=VERSION,
72
+ description="Java language",
73
+ ),
74
+ datasets.BuilderConfig(
75
+ name="go",
76
+ version=VERSION,
77
+ description="Go language",
78
+ ),
79
+ datasets.BuilderConfig(
80
+ name="python",
81
+ version=VERSION,
82
+ description="Pyhton language",
83
+ ),
84
+ datasets.BuilderConfig(
85
+ name="javascript",
86
+ version=VERSION,
87
+ description="Javascript language",
88
+ ),
89
+ datasets.BuilderConfig(
90
+ name="ruby",
91
+ version=VERSION,
92
+ description="Ruby language",
93
+ ),
94
+ datasets.BuilderConfig(
95
+ name="php",
96
+ version=VERSION,
97
+ description="PHP language",
98
+ ),
99
+ ]
100
+
101
+ DEFAULT_CONFIG_NAME = "all"
102
+
103
+ def _info(self):
104
+ return datasets.DatasetInfo(
105
+ description=_DESCRIPTION,
106
+ features=datasets.Features(
107
+ {
108
+ "repository_name": datasets.Value("string"),
109
+ "func_path_in_repository": datasets.Value("string"),
110
+ "func_name": datasets.Value("string"),
111
+ "whole_func_string": datasets.Value("string"),
112
+ "language": datasets.Value("string"),
113
+ "func_code_string": datasets.Value("string"),
114
+ "func_code_tokens": datasets.Sequence(datasets.Value("string")),
115
+ "func_documentation_string": datasets.Value("string"),
116
+ "func_documentation_tokens": datasets.Sequence(datasets.Value("string")),
117
+ "split_name": datasets.Value("string"),
118
+ "func_code_url": datasets.Value("string"),
119
+ # TODO - add licensing info in the examples
120
+ }
121
+ ),
122
+ # No default supervised keys
123
+ supervised_keys=None,
124
+ homepage=_HOMEPAGE,
125
+ license=_LICENSE,
126
+ citation=_CITATION,
127
+ )
128
+
129
+ def _split_generators(self, dl_manager):
130
+ """Returns SplitGenerators.
131
+
132
+ Note: The original data is stored in S3, and follows this unusual directory structure:
133
+ ```
134
+ .
135
+ ├── <language_name> # e.g. python
136
+ │   └── final
137
+ │   └── jsonl
138
+ │ �� ├── test
139
+ │   │   └── <language_name>_test_0.jsonl.gz
140
+ │   ├── train
141
+ │   │   ├── <language_name>_train_0.jsonl.gz
142
+ │   │   ├── <language_name>_train_1.jsonl.gz
143
+ │   │   ├── ...
144
+ │   │   └── <language_name>_train_n.jsonl.gz
145
+ │   └── valid
146
+ │   └── <language_name>_valid_0.jsonl.gz
147
+ ├── <language_name>_dedupe_definitions_v2.pkl
148
+ └── <language_name>_licenses.pkl
149
+ ```
150
+ """
151
+ data_urls = _URLs[self.config.name]
152
+ if isinstance(data_urls, str):
153
+ data_urls = {self.config.name: data_urls}
154
+ # Download & extract the language archives
155
+ data_dirs = [
156
+ os.path.join(directory, lang, "final", "jsonl")
157
+ for lang, directory in dl_manager.download_and_extract(data_urls).items()
158
+ ]
159
+
160
+ split2dirs = {
161
+ split_name: [os.path.join(directory, split_name) for directory in data_dirs]
162
+ for split_name in ["train", "test", "valid"]
163
+ }
164
+
165
+ split2paths = dl_manager.extract(
166
+ {
167
+ split_name: [
168
+ os.path.join(directory, entry_name)
169
+ for directory in split_dirs
170
+ for entry_name in os.listdir(directory)
171
+ ]
172
+ for split_name, split_dirs in split2dirs.items()
173
+ }
174
+ )
175
+
176
+ return [
177
+ datasets.SplitGenerator(
178
+ name=datasets.Split.TRAIN,
179
+ gen_kwargs={
180
+ "filepaths": split2paths["train"],
181
+ },
182
+ ),
183
+ datasets.SplitGenerator(
184
+ name=datasets.Split.TEST,
185
+ gen_kwargs={
186
+ "filepaths": split2paths["test"],
187
+ },
188
+ ),
189
+ datasets.SplitGenerator(
190
+ name=datasets.Split.VALIDATION,
191
+ gen_kwargs={
192
+ "filepaths": split2paths["valid"],
193
+ },
194
+ ),
195
+ ]
196
+
197
+ def _generate_examples(self, filepaths):
198
+ """Yields the examples by iterating through the available jsonl files."""
199
+ for file_id_, filepath in enumerate(filepaths):
200
+ with open(filepath, encoding="utf-8") as f:
201
+ for row_id_, row in enumerate(f):
202
+ # Key of the example = file_id + row_id,
203
+ # to ensure all examples have a distinct key
204
+ id_ = f"{file_id_}_{row_id_}"
205
+ data = json.loads(row)
206
+ yield id_, {
207
+ "repository_name": data["repo"],
208
+ "func_path_in_repository": data["path"],
209
+ "func_name": data["func_name"],
210
+ "whole_func_string": data["original_string"],
211
+ "language": data["language"],
212
+ "func_code_string": data["code"],
213
+ "func_code_tokens": data["code_tokens"],
214
+ "func_documentation_string": data["docstring"],
215
+ "func_documentation_tokens": data["docstring_tokens"],
216
+ "split_name": data["partition"],
217
+ "func_code_url": data["url"],
218
+ }
all/test-00000-of-00001.parquet → data/go.zip RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a4a2767a0cae74a774f5bdf7b74626c7c17052334a4de25e005e9e45981c72c0
3
- size 96096131
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15d23f01dc2796447e1736263e6830079289d5ef41f09988011afdcf8da6b6e5
3
+ size 487525935
all/train-00000-of-00002.parquet → data/java.zip RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b95cf6899f4a28043cf97f54a521c1dbed1f7e1c2a4813ed90320acf2fecf0aa
3
- size 1007776926
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05f9204b1808413fab30f0e69229e298f6de4ad468279d53a2aa5797e3a78c17
3
+ size 1060569153
data/javascript.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdc743f5af27f90c77584a2d29e2b7f8cecdd00c37b433c385b888ee062936dd
3
+ size 1664713350
go/test-00000-of-00001.parquet → data/php.zip RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cb78d8951e9623bc2460d9b29c3c4152724f28c4f685fc019e0aafa071d2b18f
3
- size 9751451
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3bbf0d1b10010f88b058faea876f1f5471758399e30d58c11f78ff53660ce00
3
+ size 851894048
all/train-00001-of-00002.parquet → data/python.zip RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:07ef6ec80c1f52ea097cafb4843ea26d1442e79e7ce4083c6a8e17eb4f0d225a
3
- size 773741030
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7223c6460bebfa85697b586da91e47bc5d64790a4d60bba5917106458ab6b40e
3
+ size 940909997
all/validation-00000-of-00001.parquet → data/ruby.zip RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f821590fcdee74a234abdec9ba879ba4740834bf6d2d4d5a1ea06d9c74ac8a39
3
- size 85606110
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67aee5812d0f994df745c771c7791483f2b060561495747d424e307af4b342e6
3
+ size 111758028
go/train-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e66892c2ce166318c06279f6354b499642c71873e284c11768d0d1105ef25e4
3
- size 211174297
 
 
 
 
go/validation-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4f0f7e5f13463f7cdbf81c6e00067fc67d710abe62ed31ed74b12b5e3083fb41
3
- size 7594978
 
 
 
 
java/test-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1f39dc48cf7e5d99cfe41c997eca8f2e7291cb570ec4d26e74a76a188eb8fa1c
3
- size 23841883
 
 
 
 
java/train-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3d54586c650eb9d26ac31864e9e82bc7376f7c988eca5ae61a5c464a17541c12
3
- size 389778264
 
 
 
 
java/validation-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a665e1b8b97558f345a6154746ae805163182d49005e4e369478541446b5b45a
3
- size 12039780
 
 
 
 
javascript/test-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8ef888d473f7e9cd6e38c116cd50749743de8a7528e3bcb29f3c45d2f3992580
3
- size 8009748
 
 
 
 
javascript/train-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e2e97aa6a5682f03c28af4bf6905493fc5ede6beebee972dfdf7be6c25c6e4ab
3
- size 159368481
 
 
 
 
javascript/validation-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:2f0ad4ee53bb5adb61539f76ffe43c700804747962ba4430ceffaad0f624bd78
3
- size 10259588
 
 
 
 
php/test-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:df63b6785f1eba4d61141b14c6fa99aca839977b6d3f4798324310190ea20c09
3
- size 23807167
 
 
 
 
php/train-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:90a99888faaac76fe4c5b5f1be0efb5c311df89f00ffe3824d8037903371451e
3
- size 460570019
 
 
 
 
php/validation-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1fde2d2ef113b4231549fb1d48a5dfa5aa4e58fec1baa6c229d25b7c4247ddf7
3
- size 23049777
 
 
 
 
python/test-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3167e79ee7f081d825bf97b96d3a6b2d96428b00f6a98125be943384d8afae5f
3
- size 28744352
 
 
 
 
python/train-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ad9e3a4ab10c2c1d8926d2b26ca2bfcc3aadda1477ba29a933391f93806b9fed
3
- size 521785752
 
 
 
 
python/validation-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:22eaacb46ed7e74d582409b85692ef63f5a43e99f9395c2eb736b5c8451422bb
3
- size 30742555
 
 
 
 
ruby/test-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:2be89df4c1d2c58c6b9606de028262a3a12bd3a0046ed478e9b3135d617c233f
3
- size 1943180
 
 
 
 
ruby/train-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f2f6c43532ac60c922bb96229e752986fd638144ad8d171d09d4561142ad1c10
3
- size 38527953
 
 
 
 
ruby/validation-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5872bc60e85ce36a017d01bf0c5f1706e58f42772110ee84c1e91f002b14c54b
3
- size 1795306