indraroy
commited on
Commit
·
7018700
1
Parent(s):
f927803
Fix loader path for HF dataset
Browse files- isonetpp_loader.py +78 -37
isonetpp_loader.py
CHANGED
|
@@ -7,41 +7,75 @@ from huggingface_hub import hf_hub_download
|
|
| 7 |
from subiso_dataset import (
|
| 8 |
SubgraphIsomorphismDataset,
|
| 9 |
TRAIN_MODE, VAL_MODE, TEST_MODE, BROAD_TEST_MODE,
|
| 10 |
-
GMN_DATA_TYPE, PYG_DATA_TYPE
|
| 11 |
)
|
| 12 |
|
| 13 |
-
#
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
return name
|
| 17 |
-
# assume large dataset default = 240k
|
| 18 |
-
return name + "240k"
|
| 19 |
|
| 20 |
-
def
|
|
|
|
|
|
|
|
|
|
| 21 |
return "small_dataset" if dataset_size == "small" else "large_dataset"
|
| 22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
def _ensure_paths(
|
| 24 |
repo_id: str,
|
| 25 |
mode: str,
|
| 26 |
-
dataset_name: str,
|
| 27 |
-
dataset_size: str,
|
| 28 |
local_root: Optional[str] = None,
|
| 29 |
) -> Dict[str, str]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
query_fname = f"{prefix}_{dataset_name}{pairs}_query_subgraphs.pkl"
|
| 37 |
-
rel_fname = f"{prefix}_{dataset_name}{pairs}_rel_nx_is_subgraph_iso.pkl"
|
| 38 |
-
corpus_fname = f"{dataset_name}{pairs}_corpus_subgraphs.pkl"
|
| 39 |
|
| 40 |
-
repo_query_path = f"{folder}/splits/{
|
| 41 |
-
repo_rel_path = f"{folder}/splits/{
|
| 42 |
repo_corpus_path = f"{folder}/corpus/{corpus_fname}"
|
| 43 |
|
| 44 |
-
kwargs = dict(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
|
| 46 |
query_path = hf_hub_download(filename=repo_query_path, **kwargs)
|
| 47 |
rel_path = hf_hub_download(filename=repo_rel_path, **kwargs)
|
|
@@ -49,19 +83,27 @@ def _ensure_paths(
|
|
| 49 |
|
| 50 |
return {"query": query_path, "rel": rel_path, "corpus": corpus_path}
|
| 51 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
def load_isonetpp_benchmark(
|
| 53 |
repo_id: str = "structlearning/isonetpp-benchmark",
|
| 54 |
-
mode: str = "train",
|
| 55 |
-
dataset_name: str = "aids",
|
| 56 |
-
dataset_size: str = "large",
|
| 57 |
batch_size: int = 128,
|
| 58 |
-
data_type: str = "pyg",
|
| 59 |
device: Optional[str] = None,
|
| 60 |
download_root: Optional[str] = None,
|
| 61 |
):
|
|
|
|
| 62 |
mode_map = {
|
| 63 |
-
"train": TRAIN_MODE,
|
| 64 |
-
"
|
|
|
|
|
|
|
|
|
|
| 65 |
}
|
| 66 |
mode_norm = mode_map.get(mode, mode)
|
| 67 |
|
|
@@ -70,25 +112,24 @@ def load_isonetpp_benchmark(
|
|
| 70 |
mode=mode_norm,
|
| 71 |
dataset_name=dataset_name,
|
| 72 |
dataset_size=dataset_size,
|
| 73 |
-
local_root=download_root
|
| 74 |
)
|
| 75 |
|
| 76 |
-
#
|
| 77 |
-
# <
|
| 78 |
-
#
|
| 79 |
-
#
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
dataset_base_path = os.path.dirname(base_path) # .../<folder>
|
| 83 |
|
| 84 |
dataset_config = dict(
|
| 85 |
mode=mode_norm,
|
| 86 |
-
dataset_name=_normalize_name(dataset_name),
|
| 87 |
dataset_size=dataset_size,
|
| 88 |
batch_size=batch_size,
|
| 89 |
data_type=data_type,
|
| 90 |
dataset_base_path=dataset_base_path,
|
| 91 |
-
dataset_path_override=
|
| 92 |
experiment=None,
|
| 93 |
device=device,
|
| 94 |
)
|
|
|
|
| 7 |
from subiso_dataset import (
|
| 8 |
SubgraphIsomorphismDataset,
|
| 9 |
TRAIN_MODE, VAL_MODE, TEST_MODE, BROAD_TEST_MODE,
|
|
|
|
| 10 |
)
|
| 11 |
|
| 12 |
+
# ----------------------------
|
| 13 |
+
# Helpers
|
| 14 |
+
# ----------------------------
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
+
def _pairs_for_size(dataset_size: str) -> str:
|
| 17 |
+
return "80k" if dataset_size == "small" else "240k"
|
| 18 |
+
|
| 19 |
+
def _folder_for_size(dataset_size: str) -> str:
|
| 20 |
return "small_dataset" if dataset_size == "small" else "large_dataset"
|
| 21 |
|
| 22 |
+
def _normalize_name(base_name: str, dataset_size: str) -> str:
|
| 23 |
+
"""
|
| 24 |
+
Accepts 'aids' or 'aids240k'.
|
| 25 |
+
If user passes bare name, append pairs; if they already passed '...80k/240k', keep as-is.
|
| 26 |
+
"""
|
| 27 |
+
pairs = _pairs_for_size(dataset_size)
|
| 28 |
+
if base_name.endswith(("80k", "240k")):
|
| 29 |
+
return base_name
|
| 30 |
+
return f"{base_name}{pairs}"
|
| 31 |
+
|
| 32 |
+
def _mode_prefix_and_dir(mode: str) -> tuple[str, str]:
|
| 33 |
+
"""
|
| 34 |
+
File prefix uses 'test' when mode contains 'test' (your repo convention).
|
| 35 |
+
Directory only has train/val/test. Map Extra_test_300 => 'test'.
|
| 36 |
+
"""
|
| 37 |
+
prefix = "test" if "test" in mode.lower() else mode
|
| 38 |
+
mode_dir = "test" if "test" in mode.lower() else mode # maps Extra_test_300 -> test
|
| 39 |
+
return prefix, mode_dir
|
| 40 |
+
|
| 41 |
+
# ----------------------------
|
| 42 |
+
# Path resolution + downloads
|
| 43 |
+
# ----------------------------
|
| 44 |
+
|
| 45 |
def _ensure_paths(
|
| 46 |
repo_id: str,
|
| 47 |
mode: str,
|
| 48 |
+
dataset_name: str, # can be 'aids' or 'aids240k'
|
| 49 |
+
dataset_size: str, # 'small' | 'large'
|
| 50 |
local_root: Optional[str] = None,
|
| 51 |
) -> Dict[str, str]:
|
| 52 |
+
"""
|
| 53 |
+
Download the three files needed into cache (or local_root if set):
|
| 54 |
+
- splits/<mode_dir>/<prefix>_<base>_query_subgraphs.pkl
|
| 55 |
+
- splits/<mode_dir>/<prefix>_<base>_rel_nx_is_subgraph_iso.pkl
|
| 56 |
+
- corpus/<base>_corpus_subgraphs.pkl
|
| 57 |
+
where <base> is the normalized dataset name (includes 80k/240k exactly once).
|
| 58 |
+
"""
|
| 59 |
+
pairs = _pairs_for_size(dataset_size)
|
| 60 |
+
folder = _folder_for_size(dataset_size) # "large_dataset" or "small_dataset"
|
| 61 |
+
base = _normalize_name(dataset_name, dataset_size) # e.g., "aids240k" (no double-append)
|
| 62 |
+
prefix, mode_dir = _mode_prefix_and_dir(mode)
|
| 63 |
|
| 64 |
+
# exact filenames used in your repo
|
| 65 |
+
query_fname = f"{prefix}_{base}_query_subgraphs.pkl"
|
| 66 |
+
rel_fname = f"{prefix}_{base}_rel_nx_is_subgraph_iso.pkl"
|
| 67 |
+
corpus_fname = f"{base}_corpus_subgraphs.pkl"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
|
| 69 |
+
repo_query_path = f"{folder}/splits/{mode_dir}/{query_fname}"
|
| 70 |
+
repo_rel_path = f"{folder}/splits/{mode_dir}/{rel_fname}"
|
| 71 |
repo_corpus_path = f"{folder}/corpus/{corpus_fname}"
|
| 72 |
|
| 73 |
+
kwargs = dict(
|
| 74 |
+
repo_id=repo_id,
|
| 75 |
+
repo_type="dataset",
|
| 76 |
+
local_dir=local_root,
|
| 77 |
+
local_dir_use_symlinks=False,
|
| 78 |
+
)
|
| 79 |
|
| 80 |
query_path = hf_hub_download(filename=repo_query_path, **kwargs)
|
| 81 |
rel_path = hf_hub_download(filename=repo_rel_path, **kwargs)
|
|
|
|
| 83 |
|
| 84 |
return {"query": query_path, "rel": rel_path, "corpus": corpus_path}
|
| 85 |
|
| 86 |
+
# ----------------------------
|
| 87 |
+
# Public entrypoint
|
| 88 |
+
# ----------------------------
|
| 89 |
+
|
| 90 |
def load_isonetpp_benchmark(
|
| 91 |
repo_id: str = "structlearning/isonetpp-benchmark",
|
| 92 |
+
mode: str = "train", # "train" | "val" | "test" | "Extra_test_300"
|
| 93 |
+
dataset_name: str = "aids", # "aids" or "aids240k" (same for mutag/ptc_*)
|
| 94 |
+
dataset_size: str = "large", # "small" | "large"
|
| 95 |
batch_size: int = 128,
|
| 96 |
+
data_type: str = "pyg", # "pyg" or "gmn"
|
| 97 |
device: Optional[str] = None,
|
| 98 |
download_root: Optional[str] = None,
|
| 99 |
):
|
| 100 |
+
# Map user mode to your class constants
|
| 101 |
mode_map = {
|
| 102 |
+
"train": TRAIN_MODE,
|
| 103 |
+
"val": VAL_MODE,
|
| 104 |
+
"test": TEST_MODE,
|
| 105 |
+
"extra_test_300": BROAD_TEST_MODE,
|
| 106 |
+
"Extra_test_300": BROAD_TEST_MODE,
|
| 107 |
}
|
| 108 |
mode_norm = mode_map.get(mode, mode)
|
| 109 |
|
|
|
|
| 112 |
mode=mode_norm,
|
| 113 |
dataset_name=dataset_name,
|
| 114 |
dataset_size=dataset_size,
|
| 115 |
+
local_root=download_root,
|
| 116 |
)
|
| 117 |
|
| 118 |
+
# paths["query"] points to .../<folder>/splits/<mode_dir>/<file>
|
| 119 |
+
# dataset_base_path must be the parent "<folder>" so that subiso_dataset finds:
|
| 120 |
+
# dataset_base_path/{splits/<mode_dir>/..., corpus/...}
|
| 121 |
+
splits_dir = os.path.dirname(paths["query"]) # .../<folder>/splits/<mode_dir>
|
| 122 |
+
folder_dir = os.path.dirname(splits_dir) # .../<folder>
|
| 123 |
+
dataset_base_path = folder_dir
|
|
|
|
| 124 |
|
| 125 |
dataset_config = dict(
|
| 126 |
mode=mode_norm,
|
| 127 |
+
dataset_name=_normalize_name(dataset_name, dataset_size), # ensure '...240k' once
|
| 128 |
dataset_size=dataset_size,
|
| 129 |
batch_size=batch_size,
|
| 130 |
data_type=data_type,
|
| 131 |
dataset_base_path=dataset_base_path,
|
| 132 |
+
dataset_path_override=_folder_for_size(dataset_size), # critical: "large_dataset"/"small_dataset"
|
| 133 |
experiment=None,
|
| 134 |
device=device,
|
| 135 |
)
|