mirror of
https://github.com/openai/gpt-2-output-dataset
synced 2025-08-22 01:51:41 +00:00
30 lines
1.0 KiB
Python
30 lines
1.0 KiB
Python
import os
|
|
import sys
|
|
import requests
|
|
from tqdm import tqdm
|
|
|
|
subdir = 'data'
|
|
if not os.path.exists(subdir):
|
|
os.makedirs(subdir)
|
|
subdir = subdir.replace('\\','/') # needed for Windows
|
|
|
|
for ds in [
|
|
'webtext',
|
|
'small-117M', 'small-117M-k40',
|
|
'medium-345M', 'medium-345M-k40',
|
|
'large-762M', 'large-762M-k40',
|
|
'xl-1542M', 'xl-1542M-k40',
|
|
]:
|
|
for split in ['train', 'valid', 'test']:
|
|
filename = ds + "." + split + '.jsonl'
|
|
r = requests.get("https://storage.googleapis.com/gpt-2/output-dataset/v1/" + filename, stream=True)
|
|
|
|
with open(os.path.join(subdir, filename), 'wb') as f:
|
|
file_size = int(r.headers["content-length"])
|
|
chunk_size = 1000
|
|
with tqdm(ncols=100, desc="Fetching " + filename, total=file_size, unit_scale=True) as pbar:
|
|
# 1k for chunk_size, since Ethernet packet size is around 1500 bytes
|
|
for chunk in r.iter_content(chunk_size=chunk_size):
|
|
f.write(chunk)
|
|
pbar.update(chunk_size)
|