fix dataset loading script
Browse files- binding_affinity.py +14 -24
binding_affinity.py
CHANGED
|
@@ -15,7 +15,6 @@
|
|
| 15 |
"""TODO: A dataset of protein sequences, ligand SMILES and binding affinities."""
|
| 16 |
|
| 17 |
import huggingface_hub
|
| 18 |
-
import pandas as pd
|
| 19 |
import os
|
| 20 |
import pyarrow.parquet as pq
|
| 21 |
import datasets
|
|
@@ -92,8 +91,10 @@ class BindingAffinity(datasets.ArrowBasedBuilder):
|
|
| 92 |
features = datasets.Features(
|
| 93 |
{
|
| 94 |
"seq": datasets.Value("string"),
|
| 95 |
-
"
|
|
|
|
| 96 |
"neg_log10_affinity_M": datasets.Value("float"),
|
|
|
|
| 97 |
"affinity": datasets.Value("float"),
|
| 98 |
# These are the features of your dataset like images, labels ...
|
| 99 |
}
|
|
@@ -124,16 +125,17 @@ class BindingAffinity(datasets.ArrowBasedBuilder):
|
|
| 124 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
| 125 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
| 126 |
files = dl_manager.download_and_extract(_URLs)
|
|
|
|
| 127 |
return [
|
| 128 |
datasets.SplitGenerator(
|
| 129 |
-
name=datasets.Split.TRAIN,
|
| 130 |
# These kwargs will be passed to _generate_examples
|
|
|
|
| 131 |
gen_kwargs={
|
| 132 |
-
|
| 133 |
},
|
| 134 |
),
|
| 135 |
datasets.SplitGenerator(
|
| 136 |
-
name=
|
| 137 |
# These kwargs will be passed to _generate_examples
|
| 138 |
gen_kwargs={
|
| 139 |
"filepath": files["no_kras"],
|
|
@@ -142,24 +144,12 @@ class BindingAffinity(datasets.ArrowBasedBuilder):
|
|
| 142 |
|
| 143 |
]
|
| 144 |
|
| 145 |
-
def _generate_examples(
|
| 146 |
-
self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
| 147 |
-
):
|
| 148 |
-
""" Yields examples as (key, example) tuples. """
|
| 149 |
-
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
| 150 |
-
# The `key` is here for legacy reason (tfds) and is not important in itself.
|
| 151 |
-
|
| 152 |
-
df = pd.read_parquet(filepath)
|
| 153 |
-
for k, row in df.iterrows():
|
| 154 |
-
yield k, {
|
| 155 |
-
"seq": row["seq"],
|
| 156 |
-
"smiles_can": row["smiles_can"],
|
| 157 |
-
"neg_log10_affinity_M": row["neg_log10_affinity_M"],
|
| 158 |
-
"affinity_uM": row["affinity_uM"],
|
| 159 |
-
}
|
| 160 |
-
|
| 161 |
def _generate_tables(
|
| 162 |
-
self, filepath
|
| 163 |
):
|
| 164 |
-
|
| 165 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
"""TODO: A dataset of protein sequences, ligand SMILES and binding affinities."""
|
| 16 |
|
| 17 |
import huggingface_hub
|
|
|
|
| 18 |
import os
|
| 19 |
import pyarrow.parquet as pq
|
| 20 |
import datasets
|
|
|
|
| 91 |
features = datasets.Features(
|
| 92 |
{
|
| 93 |
"seq": datasets.Value("string"),
|
| 94 |
+
"smiles": datasets.Value("string"),
|
| 95 |
+
"affinity_uM": datasets.Value("float"),
|
| 96 |
"neg_log10_affinity_M": datasets.Value("float"),
|
| 97 |
+
"smiles_can": datasets.Value("string"),
|
| 98 |
"affinity": datasets.Value("float"),
|
| 99 |
# These are the features of your dataset like images, labels ...
|
| 100 |
}
|
|
|
|
| 125 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
| 126 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
| 127 |
files = dl_manager.download_and_extract(_URLs)
|
| 128 |
+
print(files)
|
| 129 |
return [
|
| 130 |
datasets.SplitGenerator(
|
|
|
|
| 131 |
# These kwargs will be passed to _generate_examples
|
| 132 |
+
name=datasets.Split.TRAIN,
|
| 133 |
gen_kwargs={
|
| 134 |
+
'filepath': files["default"],
|
| 135 |
},
|
| 136 |
),
|
| 137 |
datasets.SplitGenerator(
|
| 138 |
+
name='no_kras',
|
| 139 |
# These kwargs will be passed to _generate_examples
|
| 140 |
gen_kwargs={
|
| 141 |
"filepath": files["no_kras"],
|
|
|
|
| 144 |
|
| 145 |
]
|
| 146 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
def _generate_tables(
|
| 148 |
+
self, filepath
|
| 149 |
):
|
| 150 |
+
from pyarrow import fs
|
| 151 |
+
local = fs.LocalFileSystem()
|
| 152 |
+
|
| 153 |
+
for i, f in enumerate([filepath]):
|
| 154 |
+
print(f)
|
| 155 |
+
yield i, pq.read_table(f,filesystem=local)
|