Update files from the datasets library (from 1.5.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.5.0
eli5.py
CHANGED
|
@@ -116,9 +116,9 @@ def _open_compressed_file(f_name, f_type):
|
|
| 116 |
# download a file, extract posts from desired subreddit, then remove from disk
|
| 117 |
def _download_and_select_lines(dl_manager, f_url, mode, st_time):
|
| 118 |
# download and pre-process original posts
|
| 119 |
-
|
| 120 |
f_downloaded_path = dl_manager.download(f_url)
|
| 121 |
-
|
| 122 |
f, fh = _open_compressed_file(f_downloaded_path, f_url.split(".")[-1])
|
| 123 |
lines = dict([(name, []) for name in _SUB_REDDITS])
|
| 124 |
for line in f:
|
|
@@ -131,7 +131,7 @@ def _download_and_select_lines(dl_manager, f_url, mode, st_time):
|
|
| 131 |
os.remove(f_downloaded_path)
|
| 132 |
os.remove(f_downloaded_path + ".json")
|
| 133 |
os.remove(f_downloaded_path + ".lock")
|
| 134 |
-
|
| 135 |
processed_items = dict([(name, []) for name in _SUB_REDDITS])
|
| 136 |
if mode == "submissions":
|
| 137 |
key_list = ["id", "score", "url", "title", "selftext", "subreddit"]
|
|
@@ -147,7 +147,9 @@ def _download_and_select_lines(dl_manager, f_url, mode, st_time):
|
|
| 147 |
else:
|
| 148 |
reddit_res[k] = line[k]
|
| 149 |
processed_items[name] += [reddit_res]
|
| 150 |
-
|
|
|
|
|
|
|
| 151 |
return processed_items
|
| 152 |
|
| 153 |
|
|
@@ -190,7 +192,7 @@ def _download_and_filter_reddit(dl_manager, start_year=2011, start_month=7, end_
|
|
| 190 |
for dct in processed_submissions[name]:
|
| 191 |
qa_dict[name][dct["id"]] = dct
|
| 192 |
else:
|
| 193 |
-
|
| 194 |
# then all answers
|
| 195 |
for year in range(start_year, end_year + 1):
|
| 196 |
start_mth = start_month if year == start_year else 1
|
|
@@ -209,7 +211,7 @@ def _download_and_filter_reddit(dl_manager, start_year=2011, start_month=7, end_
|
|
| 209 |
merged_comments += 1
|
| 210 |
qa_dict[name][did]["comments"] = qa_dict[name][did].get("comments", []) + [dct]
|
| 211 |
else:
|
| 212 |
-
|
| 213 |
# then post-process
|
| 214 |
res = {}
|
| 215 |
for name in _SUB_REDDITS:
|
|
|
|
| 116 |
# download a file, extract posts from desired subreddit, then remove from disk
|
| 117 |
def _download_and_select_lines(dl_manager, f_url, mode, st_time):
|
| 118 |
# download and pre-process original posts
|
| 119 |
+
logger.info("downloading {} {:.2f}".format(f_url, time() - st_time))
|
| 120 |
f_downloaded_path = dl_manager.download(f_url)
|
| 121 |
+
logger.info("decompressing and filtering {} {:.2f}".format(f_url, time() - st_time))
|
| 122 |
f, fh = _open_compressed_file(f_downloaded_path, f_url.split(".")[-1])
|
| 123 |
lines = dict([(name, []) for name in _SUB_REDDITS])
|
| 124 |
for line in f:
|
|
|
|
| 131 |
os.remove(f_downloaded_path)
|
| 132 |
os.remove(f_downloaded_path + ".json")
|
| 133 |
os.remove(f_downloaded_path + ".lock")
|
| 134 |
+
logger.info("tokenizing and selecting {} {:.2f}".format(f_url, time() - st_time))
|
| 135 |
processed_items = dict([(name, []) for name in _SUB_REDDITS])
|
| 136 |
if mode == "submissions":
|
| 137 |
key_list = ["id", "score", "url", "title", "selftext", "subreddit"]
|
|
|
|
| 147 |
else:
|
| 148 |
reddit_res[k] = line[k]
|
| 149 |
processed_items[name] += [reddit_res]
|
| 150 |
+
logger.info(
|
| 151 |
+
"Total found {} {} {:.2f}".format(sum([len(ls) for ls in processed_items.values()]), mode, time() - st_time)
|
| 152 |
+
)
|
| 153 |
return processed_items
|
| 154 |
|
| 155 |
|
|
|
|
| 192 |
for dct in processed_submissions[name]:
|
| 193 |
qa_dict[name][dct["id"]] = dct
|
| 194 |
else:
|
| 195 |
+
logger.info("Could not find submissions dump file for year {:4d} month {:2d}".format(year, month))
|
| 196 |
# then all answers
|
| 197 |
for year in range(start_year, end_year + 1):
|
| 198 |
start_mth = start_month if year == start_year else 1
|
|
|
|
| 211 |
merged_comments += 1
|
| 212 |
qa_dict[name][did]["comments"] = qa_dict[name][did].get("comments", []) + [dct]
|
| 213 |
else:
|
| 214 |
+
logger.info("Could not find comments dump file for year {:4d} month {:2d}".format(year, month))
|
| 215 |
# then post-process
|
| 216 |
res = {}
|
| 217 |
for name in _SUB_REDDITS:
|