indraroy
commited on
Commit
·
f927803
1
Parent(s):
ee2dcc7
Fix loader path for HF dataset
Browse files- isonetpp_loader.py +43 -66
isonetpp_loader.py
CHANGED
|
@@ -1,92 +1,67 @@
|
|
| 1 |
# isonetpp_loader.py
|
| 2 |
from __future__ import annotations
|
| 3 |
import os
|
| 4 |
-
import
|
| 5 |
-
from typing import Literal, Optional, Dict
|
| 6 |
from huggingface_hub import hf_hub_download
|
| 7 |
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
f"Import error: {e}"
|
| 14 |
-
)
|
| 15 |
-
|
| 16 |
-
Mode = Literal["train", "val", "test", "Extra_test_300"]
|
| 17 |
-
Size = Literal["small", "large"]
|
| 18 |
-
Name = Literal["aids240k", "mutag240k", "ptc_fm240k", "ptc_fr240k", "ptc_mm240k", "ptc_mr240k"]
|
| 19 |
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
-
def
|
| 25 |
-
return "
|
| 26 |
|
| 27 |
def _ensure_paths(
|
| 28 |
repo_id: str,
|
| 29 |
-
mode:
|
| 30 |
-
dataset_name:
|
| 31 |
-
dataset_size:
|
| 32 |
local_root: Optional[str] = None,
|
| 33 |
) -> Dict[str, str]:
|
| 34 |
-
"""
|
| 35 |
-
Download the three files needed for a given split into local cache (or local_root if set):
|
| 36 |
-
- <mode>_<name><pairs>_query_subgraphs.pkl
|
| 37 |
-
- <mode>_<name><pairs>_rel_nx_is_subgraph_iso.pkl
|
| 38 |
-
- <name><pairs>_corpus_subgraphs.pkl (lives next to splits in our layout under `corpus/`)
|
| 39 |
-
Returns local file paths.
|
| 40 |
-
"""
|
| 41 |
-
prefix = _mode_prefix(mode)
|
| 42 |
-
pairs = _pair_count(dataset_size)
|
| 43 |
|
| 44 |
-
|
| 45 |
-
#
|
| 46 |
-
|
| 47 |
-
# splits/<mode>/<mode>_<name>_rel_nx_is_subgraph_iso.pkl
|
| 48 |
-
query_fname = f"{prefix}_{dataset_name}_{'query_subgraphs' if '_' in dataset_name else 'query_subgraphs'}.pkl"
|
| 49 |
-
rel_fname = f"{prefix}_{dataset_name}_{'rel_nx_is_subgraph_iso' if '_' in dataset_name else 'rel_nx_is_subgraph_iso'}.pkl"
|
| 50 |
pairs = "80k" if dataset_size == "small" else "240k"
|
| 51 |
-
size_folder = "small_dataset" if dataset_size == "small" else "large_dataset"
|
| 52 |
|
| 53 |
-
# Your actual saved names were like: train_aids240k_query_subgraphs.pkl (without extra underscore)
|
| 54 |
-
# So fix the minor formatting exactly:
|
| 55 |
query_fname = f"{prefix}_{dataset_name}{pairs}_query_subgraphs.pkl"
|
| 56 |
rel_fname = f"{prefix}_{dataset_name}{pairs}_rel_nx_is_subgraph_iso.pkl"
|
| 57 |
corpus_fname = f"{dataset_name}{pairs}_corpus_subgraphs.pkl"
|
| 58 |
|
|
|
|
|
|
|
|
|
|
| 59 |
|
|
|
|
| 60 |
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
repo_corpus_path = f"{size_folder}/corpus/{corpus_fname}"
|
| 65 |
-
|
| 66 |
-
# Download to cache (or local_root if provided)
|
| 67 |
-
kwargs = dict(repo_id=repo_id, repo_type="dataset")
|
| 68 |
-
query_path = hf_hub_download(filename=repo_query_path, **kwargs, local_dir=local_root, local_dir_use_symlinks=False)
|
| 69 |
-
rel_path = hf_hub_download(filename=repo_rel_path, **kwargs, local_dir=local_root, local_dir_use_symlinks=False)
|
| 70 |
-
corpus_path= hf_hub_download(filename=repo_corpus_path,**kwargs, local_dir=local_root, local_dir_use_symlinks=False)
|
| 71 |
|
| 72 |
return {"query": query_path, "rel": rel_path, "corpus": corpus_path}
|
| 73 |
|
| 74 |
def load_isonetpp_benchmark(
|
| 75 |
repo_id: str = "structlearning/isonetpp-benchmark",
|
| 76 |
-
mode:
|
| 77 |
-
dataset_name:
|
| 78 |
-
dataset_size:
|
| 79 |
batch_size: int = 128,
|
| 80 |
data_type: str = "pyg",
|
| 81 |
device: Optional[str] = None,
|
| 82 |
download_root: Optional[str] = None,
|
| 83 |
):
|
| 84 |
-
"""
|
| 85 |
-
Returns: an initialized SubgraphIsomorphismDataset with files downloaded from the HF Hub.
|
| 86 |
-
"""
|
| 87 |
-
# Map to your class constants
|
| 88 |
mode_map = {
|
| 89 |
-
"train": TRAIN_MODE, "val": VAL_MODE, "test": TEST_MODE,
|
|
|
|
| 90 |
}
|
| 91 |
mode_norm = mode_map.get(mode, mode)
|
| 92 |
|
|
@@ -95,25 +70,27 @@ def load_isonetpp_benchmark(
|
|
| 95 |
mode=mode_norm,
|
| 96 |
dataset_name=dataset_name,
|
| 97 |
dataset_size=dataset_size,
|
| 98 |
-
local_root=download_root
|
| 99 |
)
|
| 100 |
|
| 101 |
-
#
|
| 102 |
-
#
|
| 103 |
-
|
| 104 |
-
|
|
|
|
|
|
|
|
|
|
| 105 |
|
| 106 |
dataset_config = dict(
|
| 107 |
mode=mode_norm,
|
| 108 |
-
dataset_name=dataset_name,
|
| 109 |
dataset_size=dataset_size,
|
| 110 |
batch_size=batch_size,
|
| 111 |
data_type=data_type,
|
| 112 |
dataset_base_path=dataset_base_path,
|
|
|
|
| 113 |
experiment=None,
|
| 114 |
-
dataset_path_override="large_dataset" if dataset_size=="large" else "small_dataset",
|
| 115 |
device=device,
|
| 116 |
)
|
| 117 |
|
| 118 |
-
|
| 119 |
-
return ds
|
|
|
|
| 1 |
# isonetpp_loader.py
|
| 2 |
from __future__ import annotations
|
| 3 |
import os
|
| 4 |
+
from typing import Optional, Dict
|
|
|
|
| 5 |
from huggingface_hub import hf_hub_download
|
| 6 |
|
| 7 |
+
from subiso_dataset import (
|
| 8 |
+
SubgraphIsomorphismDataset,
|
| 9 |
+
TRAIN_MODE, VAL_MODE, TEST_MODE, BROAD_TEST_MODE,
|
| 10 |
+
GMN_DATA_TYPE, PYG_DATA_TYPE
|
| 11 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
+
# Normalize names users pass ("aids" or "aids240k" → stored names are aids240k)
|
| 14 |
+
def _normalize_name(name: str) -> str:
|
| 15 |
+
if name.endswith("240k") or name.endswith("80k"):
|
| 16 |
+
return name
|
| 17 |
+
# assume large dataset default = 240k
|
| 18 |
+
return name + "240k"
|
| 19 |
|
| 20 |
+
def _folder(dataset_size: str) -> str:
|
| 21 |
+
return "small_dataset" if dataset_size == "small" else "large_dataset"
|
| 22 |
|
| 23 |
def _ensure_paths(
|
| 24 |
repo_id: str,
|
| 25 |
+
mode: str,
|
| 26 |
+
dataset_name: str,
|
| 27 |
+
dataset_size: str,
|
| 28 |
local_root: Optional[str] = None,
|
| 29 |
) -> Dict[str, str]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
|
| 31 |
+
dataset_name = _normalize_name(dataset_name)
|
| 32 |
+
folder = _folder(dataset_size) # "large_dataset" or "small_dataset"
|
| 33 |
+
prefix = "test" if "test" in mode.lower() else mode
|
|
|
|
|
|
|
|
|
|
| 34 |
pairs = "80k" if dataset_size == "small" else "240k"
|
|
|
|
| 35 |
|
|
|
|
|
|
|
| 36 |
query_fname = f"{prefix}_{dataset_name}{pairs}_query_subgraphs.pkl"
|
| 37 |
rel_fname = f"{prefix}_{dataset_name}{pairs}_rel_nx_is_subgraph_iso.pkl"
|
| 38 |
corpus_fname = f"{dataset_name}{pairs}_corpus_subgraphs.pkl"
|
| 39 |
|
| 40 |
+
repo_query_path = f"{folder}/splits/{mode}/{query_fname}"
|
| 41 |
+
repo_rel_path = f"{folder}/splits/{mode}/{rel_fname}"
|
| 42 |
+
repo_corpus_path = f"{folder}/corpus/{corpus_fname}"
|
| 43 |
|
| 44 |
+
kwargs = dict(repo_id=repo_id, repo_type="dataset", local_dir=local_root, local_dir_use_symlinks=False)
|
| 45 |
|
| 46 |
+
query_path = hf_hub_download(filename=repo_query_path, **kwargs)
|
| 47 |
+
rel_path = hf_hub_download(filename=repo_rel_path, **kwargs)
|
| 48 |
+
corpus_path = hf_hub_download(filename=repo_corpus_path, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
|
| 50 |
return {"query": query_path, "rel": rel_path, "corpus": corpus_path}
|
| 51 |
|
| 52 |
def load_isonetpp_benchmark(
|
| 53 |
repo_id: str = "structlearning/isonetpp-benchmark",
|
| 54 |
+
mode: str = "train",
|
| 55 |
+
dataset_name: str = "aids",
|
| 56 |
+
dataset_size: str = "large",
|
| 57 |
batch_size: int = 128,
|
| 58 |
data_type: str = "pyg",
|
| 59 |
device: Optional[str] = None,
|
| 60 |
download_root: Optional[str] = None,
|
| 61 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
mode_map = {
|
| 63 |
+
"train": TRAIN_MODE, "val": VAL_MODE, "test": TEST_MODE,
|
| 64 |
+
"extra_test_300": BROAD_TEST_MODE, "Extra_test_300": BROAD_TEST_MODE
|
| 65 |
}
|
| 66 |
mode_norm = mode_map.get(mode, mode)
|
| 67 |
|
|
|
|
| 70 |
mode=mode_norm,
|
| 71 |
dataset_name=dataset_name,
|
| 72 |
dataset_size=dataset_size,
|
| 73 |
+
local_root=download_root
|
| 74 |
)
|
| 75 |
|
| 76 |
+
# The downloaded structure is:
|
| 77 |
+
# <cache>/.../<folder>/splits/<mode>/<files>
|
| 78 |
+
# <cache>/.../<folder>/corpus/<files>
|
| 79 |
+
#
|
| 80 |
+
# So dataset_base_path = parent of <folder>
|
| 81 |
+
base_path = os.path.dirname(os.path.dirname(paths["query"])) # .../<folder>/splits
|
| 82 |
+
dataset_base_path = os.path.dirname(base_path) # .../<folder>
|
| 83 |
|
| 84 |
dataset_config = dict(
|
| 85 |
mode=mode_norm,
|
| 86 |
+
dataset_name=_normalize_name(dataset_name),
|
| 87 |
dataset_size=dataset_size,
|
| 88 |
batch_size=batch_size,
|
| 89 |
data_type=data_type,
|
| 90 |
dataset_base_path=dataset_base_path,
|
| 91 |
+
dataset_path_override=_folder(dataset_size), # 🟢 critical fix
|
| 92 |
experiment=None,
|
|
|
|
| 93 |
device=device,
|
| 94 |
)
|
| 95 |
|
| 96 |
+
return SubgraphIsomorphismDataset(**dataset_config)
|
|
|