indraroy
commited on
Commit
·
703037f
1
Parent(s):
7018700
Fix loader path for HF dataset
Browse files- isonetpp_loader.py +28 -24
isonetpp_loader.py
CHANGED
|
@@ -21,8 +21,8 @@ def _folder_for_size(dataset_size: str) -> str:
|
|
| 21 |
|
| 22 |
def _normalize_name(base_name: str, dataset_size: str) -> str:
|
| 23 |
"""
|
| 24 |
-
Accepts 'aids' or 'aids240k'.
|
| 25 |
-
If
|
| 26 |
"""
|
| 27 |
pairs = _pairs_for_size(dataset_size)
|
| 28 |
if base_name.endswith(("80k", "240k")):
|
|
@@ -31,11 +31,11 @@ def _normalize_name(base_name: str, dataset_size: str) -> str:
|
|
| 31 |
|
| 32 |
def _mode_prefix_and_dir(mode: str) -> tuple[str, str]:
|
| 33 |
"""
|
| 34 |
-
File prefix uses 'test' when mode contains 'test' (
|
| 35 |
-
Directory
|
| 36 |
"""
|
| 37 |
prefix = "test" if "test" in mode.lower() else mode
|
| 38 |
-
mode_dir = "test" if "test" in mode.lower() else mode
|
| 39 |
return prefix, mode_dir
|
| 40 |
|
| 41 |
# ----------------------------
|
|
@@ -45,23 +45,21 @@ def _mode_prefix_and_dir(mode: str) -> tuple[str, str]:
|
|
| 45 |
def _ensure_paths(
|
| 46 |
repo_id: str,
|
| 47 |
mode: str,
|
| 48 |
-
dataset_name: str, #
|
| 49 |
dataset_size: str, # 'small' | 'large'
|
| 50 |
local_root: Optional[str] = None,
|
| 51 |
) -> Dict[str, str]:
|
| 52 |
"""
|
| 53 |
Download the three files needed into cache (or local_root if set):
|
| 54 |
-
- splits/<mode_dir>/<prefix>_<base>_query_subgraphs.pkl
|
| 55 |
-
- splits/<mode_dir>/<prefix>_<base>_rel_nx_is_subgraph_iso.pkl
|
| 56 |
-
- corpus/<base>_corpus_subgraphs.pkl
|
| 57 |
-
where <base> is
|
| 58 |
"""
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
base = _normalize_name(dataset_name, dataset_size) # e.g., "aids240k" (no double-append)
|
| 62 |
prefix, mode_dir = _mode_prefix_and_dir(mode)
|
| 63 |
|
| 64 |
-
# exact filenames used in your repo
|
| 65 |
query_fname = f"{prefix}_{base}_query_subgraphs.pkl"
|
| 66 |
rel_fname = f"{prefix}_{base}_rel_nx_is_subgraph_iso.pkl"
|
| 67 |
corpus_fname = f"{base}_corpus_subgraphs.pkl"
|
|
@@ -97,7 +95,7 @@ def load_isonetpp_benchmark(
|
|
| 97 |
device: Optional[str] = None,
|
| 98 |
download_root: Optional[str] = None,
|
| 99 |
):
|
| 100 |
-
# Map
|
| 101 |
mode_map = {
|
| 102 |
"train": TRAIN_MODE,
|
| 103 |
"val": VAL_MODE,
|
|
@@ -115,21 +113,27 @@ def load_isonetpp_benchmark(
|
|
| 115 |
local_root=download_root,
|
| 116 |
)
|
| 117 |
|
| 118 |
-
# paths["query"]
|
| 119 |
-
# dataset_base_path
|
| 120 |
-
# dataset_base_path/
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
|
| 125 |
dataset_config = dict(
|
| 126 |
mode=mode_norm,
|
| 127 |
-
dataset_name=_normalize_name(dataset_name, dataset_size),
|
| 128 |
dataset_size=dataset_size,
|
| 129 |
batch_size=batch_size,
|
| 130 |
data_type=data_type,
|
| 131 |
-
dataset_base_path=
|
| 132 |
-
dataset_path_override=_folder_for_size(dataset_size),
|
| 133 |
experiment=None,
|
| 134 |
device=device,
|
| 135 |
)
|
|
|
|
| 21 |
|
| 22 |
def _normalize_name(base_name: str, dataset_size: str) -> str:
|
| 23 |
"""
|
| 24 |
+
Accepts 'aids' or 'aids240k' (and similarly for other sets).
|
| 25 |
+
If bare name -> append pairs; if already has 80k/240k -> keep as-is.
|
| 26 |
"""
|
| 27 |
pairs = _pairs_for_size(dataset_size)
|
| 28 |
if base_name.endswith(("80k", "240k")):
|
|
|
|
| 31 |
|
| 32 |
def _mode_prefix_and_dir(mode: str) -> tuple[str, str]:
|
| 33 |
"""
|
| 34 |
+
File prefix uses 'test' when mode contains 'test' (repo convention).
|
| 35 |
+
Directory has train/val/test. Map Extra_test_300 => 'test'.
|
| 36 |
"""
|
| 37 |
prefix = "test" if "test" in mode.lower() else mode
|
| 38 |
+
mode_dir = "test" if "test" in mode.lower() else mode
|
| 39 |
return prefix, mode_dir
|
| 40 |
|
| 41 |
# ----------------------------
|
|
|
|
| 45 |
def _ensure_paths(
|
| 46 |
repo_id: str,
|
| 47 |
mode: str,
|
| 48 |
+
dataset_name: str, # 'aids' or 'aids240k'
|
| 49 |
dataset_size: str, # 'small' | 'large'
|
| 50 |
local_root: Optional[str] = None,
|
| 51 |
) -> Dict[str, str]:
|
| 52 |
"""
|
| 53 |
Download the three files needed into cache (or local_root if set):
|
| 54 |
+
- large_dataset/splits/<mode_dir>/<prefix>_<base>_query_subgraphs.pkl
|
| 55 |
+
- large_dataset/splits/<mode_dir>/<prefix>_<base>_rel_nx_is_subgraph_iso.pkl
|
| 56 |
+
- large_dataset/corpus/<base>_corpus_subgraphs.pkl
|
| 57 |
+
where <base> is normalized (contains 80k/240k exactly once).
|
| 58 |
"""
|
| 59 |
+
folder = _folder_for_size(dataset_size) # "large_dataset" or "small_dataset"
|
| 60 |
+
base = _normalize_name(dataset_name, dataset_size) # e.g., "aids240k"
|
|
|
|
| 61 |
prefix, mode_dir = _mode_prefix_and_dir(mode)
|
| 62 |
|
|
|
|
| 63 |
query_fname = f"{prefix}_{base}_query_subgraphs.pkl"
|
| 64 |
rel_fname = f"{prefix}_{base}_rel_nx_is_subgraph_iso.pkl"
|
| 65 |
corpus_fname = f"{base}_corpus_subgraphs.pkl"
|
|
|
|
| 95 |
device: Optional[str] = None,
|
| 96 |
download_root: Optional[str] = None,
|
| 97 |
):
|
| 98 |
+
# Map to class constants
|
| 99 |
mode_map = {
|
| 100 |
"train": TRAIN_MODE,
|
| 101 |
"val": VAL_MODE,
|
|
|
|
| 113 |
local_root=download_root,
|
| 114 |
)
|
| 115 |
|
| 116 |
+
# paths["query"] = .../<folder>/splits/<mode_dir>/<file>
|
| 117 |
+
# We want dataset_base_path to be the **parent of <folder>** so that:
|
| 118 |
+
# dataset_base_path / dataset_path_override / splits/<mode>/... exists
|
| 119 |
+
# Compute levels carefully:
|
| 120 |
+
# file_dir = .../<folder>/splits/<mode_dir>
|
| 121 |
+
# splits_dir = .../<folder>/splits
|
| 122 |
+
# folder_dir = .../<folder>
|
| 123 |
+
# parent_dir = parent of <folder>
|
| 124 |
+
file_dir = os.path.dirname(paths["query"])
|
| 125 |
+
splits_dir = os.path.dirname(file_dir)
|
| 126 |
+
folder_dir = os.path.dirname(splits_dir)
|
| 127 |
+
parent_dir = os.path.dirname(folder_dir) # <-- this is the correct dataset_base_path
|
| 128 |
|
| 129 |
dataset_config = dict(
|
| 130 |
mode=mode_norm,
|
| 131 |
+
dataset_name=_normalize_name(dataset_name, dataset_size),
|
| 132 |
dataset_size=dataset_size,
|
| 133 |
batch_size=batch_size,
|
| 134 |
data_type=data_type,
|
| 135 |
+
dataset_base_path=parent_dir, # parent of <folder>
|
| 136 |
+
dataset_path_override=_folder_for_size(dataset_size), # "large_dataset"/"small_dataset"
|
| 137 |
experiment=None,
|
| 138 |
device=device,
|
| 139 |
)
|