|
|
import os |
|
|
import torch |
|
|
import datetime |
|
|
import shutil |
|
|
from pathlib import Path |
|
|
import argparse |
|
|
from types import SimpleNamespace |
|
|
import sys |
|
|
import numpy as np |
|
|
|
|
|
|
|
|
from conf import config as config_module |
|
|
from utils.logger import Logger, log_info |
|
|
from utils.utils import set_seed, ddp_setup, destroy_process_group, get_data_paths |
|
|
from dataset.data_util import TrajectoryDataset |
|
|
from torch.utils.data import DataLoader |
|
|
from diffProModel.Diffusion import Diffusion |
|
|
from diffProModel.protoTrans import TrajectoryTransformer |
|
|
|
|
|
|
|
|
from train import train_main |
|
|
from test import test_model |
|
|
|
|
|
def setup_experiment_environment(base_exp_dir, exp_name_with_timestamp, config_to_save, files_to_copy=None): |
|
|
"""Sets up the experiment directory structure and saves essential files.""" |
|
|
exp_dir = base_exp_dir / exp_name_with_timestamp |
|
|
results_dir = exp_dir / 'results' |
|
|
models_dir = exp_dir / 'models' |
|
|
logs_dir = exp_dir / 'logs' |
|
|
code_save_dir = exp_dir / 'code_snapshot' |
|
|
|
|
|
os.makedirs(results_dir, exist_ok=True) |
|
|
os.makedirs(models_dir, exist_ok=True) |
|
|
os.makedirs(logs_dir, exist_ok=True) |
|
|
os.makedirs(code_save_dir, exist_ok=True) |
|
|
|
|
|
|
|
|
|
|
|
with open(exp_dir / 'config_used.txt', 'w') as f: |
|
|
import json |
|
|
|
|
|
def ns_to_dict(ns): |
|
|
if isinstance(ns, SimpleNamespace): |
|
|
return {k: ns_to_dict(v) for k, v in ns.__dict__.items()} |
|
|
elif isinstance(ns, dict): |
|
|
return {k: ns_to_dict(v) for k, v in ns.items()} |
|
|
elif isinstance(ns, list): |
|
|
return [ns_to_dict(i) for i in ns] |
|
|
return ns |
|
|
config_dict = ns_to_dict(config_to_save) |
|
|
json.dump(config_dict, f, indent=4) |
|
|
|
|
|
|
|
|
if files_to_copy: |
|
|
for file_path_str in files_to_copy: |
|
|
try: |
|
|
file_path = Path(file_path_str) |
|
|
if file_path.exists(): |
|
|
shutil.copy(file_path, code_save_dir) |
|
|
else: |
|
|
print(f"Warning: File to copy not found: {file_path_str}") |
|
|
except Exception as e: |
|
|
print(f"Warning: Could not copy file {file_path_str}: {e}") |
|
|
|
|
|
return exp_dir, models_dir, logs_dir, results_dir |
|
|
|
|
|
def main(): |
|
|
parser = argparse.ArgumentParser(description='Unified Trajectory Interpolation - Training with Periodic Validation') |
|
|
parser.add_argument('--sampling_type', type=str, default='ddpm', choices=['ddpm', 'ddim'], |
|
|
help='Diffusion sampling type (ddpm or ddim) - influences periodic validation if DDIM is chosen, and experiment naming.') |
|
|
parser.add_argument('--config_module_path', type=str, default='conf.config', |
|
|
help='Python module path for base configuration (e.g., conf.config)') |
|
|
parser.add_argument('--exp_name', type=str, default='traj_interp_exp', |
|
|
help='Base name for the experiment directory') |
|
|
parser.add_argument('--seed', type=int, default=42, help='Random seed') |
|
|
parser.add_argument('--device_id', type=int, default=0, help='CUDA device ID to use') |
|
|
parser.add_argument('--distributed', action='store_true', help='Enable distributed training (DDP)') |
|
|
|
|
|
parser.add_argument('--ddim_steps', type=int, default=50, help='Number of DDIM sampling steps for periodic validation') |
|
|
parser.add_argument('--ddim_eta', type=float, default=0.0, |
|
|
help='DDIM stochasticity parameter for periodic validation (0=deterministic, 1=DDPM-like)') |
|
|
|
|
|
parser.add_argument('--debug', action='store_true', help='Enable debug mode for more detailed logs') |
|
|
|
|
|
|
|
|
parser.add_argument('--mode', type=str, default='train', choices=['train', 'test'], |
|
|
help='运行模式:训练或测试') |
|
|
parser.add_argument('--model_path', type=str, default=None, |
|
|
help='测试模式下,指定要加载的模型路径') |
|
|
parser.add_argument('--model_epoch', type=int, default=None, |
|
|
help='测试模式下,指定要加载的模型epoch') |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
|
|
|
if args.distributed: |
|
|
ddp_setup(args.distributed) |
|
|
local_rank = int(os.environ.get('LOCAL_RANK', 0)) |
|
|
else: |
|
|
local_rank = 0 |
|
|
|
|
|
if not args.distributed or local_rank == 0: |
|
|
print(f"Running on device: cuda:{args.device_id}" if torch.cuda.is_available() else "Running on CPU") |
|
|
|
|
|
if torch.cuda.is_available(): |
|
|
torch.cuda.set_device(args.device_id if not args.distributed else local_rank) |
|
|
|
|
|
set_seed(args.seed + local_rank) |
|
|
|
|
|
|
|
|
try: |
|
|
base_config_dict = config_module.load_config() |
|
|
except Exception as e: |
|
|
print(f"Error loading base configuration from {args.config_module_path}: {e}") |
|
|
sys.exit(1) |
|
|
|
|
|
|
|
|
if isinstance(base_config_dict, dict): |
|
|
cfg_ns = {k: SimpleNamespace(**v) for k, v in base_config_dict.items()} |
|
|
config = SimpleNamespace(**cfg_ns) |
|
|
else: |
|
|
|
|
|
config = base_config_dict |
|
|
|
|
|
|
|
|
config.debug = args.debug |
|
|
config.training.dis_gpu = args.distributed |
|
|
config.sampling.type = args.sampling_type |
|
|
config.sampling.ddim_steps = args.ddim_steps |
|
|
config.sampling.ddim_eta = args.ddim_eta |
|
|
config.device_id = args.device_id |
|
|
|
|
|
if not hasattr(config, 'model'): config.model = SimpleNamespace() |
|
|
if not hasattr(config.model, 'loss_type'): config.model.loss_type = 'l1' |
|
|
if not hasattr(config.training, 'learning_rate'): config.training.learning_rate = 2e-4 |
|
|
if not hasattr(config.training, 'warmup_epochs'): config.training.warmup_epochs = 10 |
|
|
if not hasattr(config.training, 'contrastive_margin'): config.training.contrastive_margin = 1.0 |
|
|
if not hasattr(config.training, 'use_amp'): config.training.use_amp = True |
|
|
if not hasattr(config.training, 'kmeans_memory_size'): config.training.kmeans_memory_size = 10 |
|
|
if not hasattr(config.training, 'ce_loss_weight'): config.training.ce_loss_weight = 0.1 |
|
|
if not hasattr(config.training, 'diffusion_loss_weight'): config.training.diffusion_loss_weight = 1.0 |
|
|
if not hasattr(config.training, 'contrastive_loss_weight'): config.training.contrastive_loss_weight = 1.0 |
|
|
|
|
|
|
|
|
timestamp_str = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") |
|
|
|
|
|
exp_name_ts = f"{args.exp_name}_{config.data.dataset}_len{config.data.traj_length}_{args.sampling_type}_{timestamp_str}" |
|
|
|
|
|
exp_dir, models_save_dir, logs_dir, results_dir = Path("."), Path("."), Path("."), Path(".") |
|
|
if local_rank == 0: |
|
|
root_dir = Path(__file__).resolve().parent |
|
|
base_experiment_path = root_dir / "Experiments" |
|
|
|
|
|
files_to_copy_snapshot = [ |
|
|
'main.py', 'train.py', 'test.py', 'conf/config.py', |
|
|
'diffProModel/Diffusion.py', 'diffProModel/protoTrans.py', 'diffProModel/loss.py', |
|
|
'utils/utils.py', 'utils/logger.py', 'utils/metric.py', 'dataset/data_util.py' |
|
|
] |
|
|
exp_dir, models_save_dir, logs_dir, results_dir = setup_experiment_environment( |
|
|
base_experiment_path, exp_name_ts, config, files_to_copy_snapshot |
|
|
) |
|
|
|
|
|
|
|
|
logger = None |
|
|
if local_rank == 0: |
|
|
log_file_path = logs_dir / f"log_{timestamp_str}.txt" |
|
|
logger = Logger( |
|
|
name=exp_name_ts, |
|
|
log_path=log_file_path, |
|
|
colorize=True, |
|
|
level="debug" if args.debug else "info" |
|
|
) |
|
|
logger.info(f"Experiment directory: {exp_dir}") |
|
|
log_info(config, logger) |
|
|
logger.info(f"Using sampling type for periodic validation: {args.sampling_type}") |
|
|
if args.sampling_type == 'ddim': |
|
|
logger.info(f"DDIM Steps for validation: {args.ddim_steps}, Eta for validation: {args.ddim_eta}") |
|
|
|
|
|
|
|
|
if args.distributed: |
|
|
torch.distributed.barrier() |
|
|
|
|
|
|
|
|
device = torch.device(f"cuda:{args.device_id}" if torch.cuda.is_available() else "cpu") |
|
|
|
|
|
if args.mode == 'train': |
|
|
|
|
|
if logger and local_rank == 0: |
|
|
logger.info("Starting training with periodic validation...") |
|
|
|
|
|
train_main(config, logger, exp_dir, timestamp_str) |
|
|
else: |
|
|
if logger and local_rank == 0: |
|
|
logger.info("Starting model testing...") |
|
|
logger.info(f"Loading model from: {args.model_path}") |
|
|
logger.info(f"Using epoch: {args.model_epoch}") |
|
|
|
|
|
|
|
|
test_dataset = TrajectoryDataset( |
|
|
file_paths=get_data_paths(config.data, for_train=False), |
|
|
traj_length=config.data.traj_length |
|
|
) |
|
|
test_dataloader = DataLoader( |
|
|
test_dataset, |
|
|
batch_size=config.sampling.batch_size, |
|
|
shuffle=False, |
|
|
num_workers=config.data.num_workers if isinstance(config.data.num_workers, int) else 4, |
|
|
pin_memory=True |
|
|
) |
|
|
|
|
|
|
|
|
diffusion_model = Diffusion( |
|
|
loss_type=config.model.loss_type, |
|
|
config=config, |
|
|
clip_denoised=True, |
|
|
predict_epsilon=True |
|
|
).to(device) |
|
|
short_samples_model = TrajectoryTransformer( |
|
|
input_dim=config.trans.input_dim, |
|
|
embed_dim=config.trans.embed_dim, |
|
|
num_layers=config.trans.num_layers, |
|
|
num_heads=config.trans.num_heads, |
|
|
forward_dim=config.trans.forward_dim, |
|
|
seq_len=config.data.traj_length, |
|
|
n_cluster=config.trans.N_CLUSTER, |
|
|
dropout=config.trans.dropout |
|
|
).to(device) |
|
|
|
|
|
|
|
|
models_base_dir = Path(args.model_path) / "models" |
|
|
|
|
|
timestamp_dirs = [d for d in models_base_dir.iterdir() if d.is_dir()] |
|
|
if not timestamp_dirs: |
|
|
raise FileNotFoundError(f"No model timestamp directories found in {models_base_dir}") |
|
|
|
|
|
|
|
|
latest_timestamp_dir = sorted(timestamp_dirs, key=lambda x: x.name)[-1] |
|
|
model_dir = latest_timestamp_dir |
|
|
|
|
|
diffusion_model_path = model_dir / f"diffusion_model_epoch_{args.model_epoch}.pt" |
|
|
transformer_model_path = model_dir / f"transformer_epoch_{args.model_epoch}.pt" |
|
|
prototypes_path = model_dir / f"prototypes_transformer_epoch_{args.model_epoch}.npy" |
|
|
|
|
|
if logger and local_rank == 0: |
|
|
logger.info(f"Loading diffusion model from: {diffusion_model_path}") |
|
|
logger.info(f"Loading transformer model from: {transformer_model_path}") |
|
|
logger.info(f"Loading prototypes from: {prototypes_path}") |
|
|
|
|
|
diffusion_model.load_state_dict(torch.load(diffusion_model_path, map_location=device)) |
|
|
short_samples_model.load_state_dict(torch.load(transformer_model_path, map_location=device)) |
|
|
prototypes = torch.from_numpy(np.load(prototypes_path)).float().to(device) |
|
|
|
|
|
|
|
|
with torch.no_grad(): |
|
|
test_model( |
|
|
test_dataloader=test_dataloader, |
|
|
diffusion_model=diffusion_model, |
|
|
short_samples_model=short_samples_model, |
|
|
config=config, |
|
|
epoch=args.model_epoch, |
|
|
prototypes=prototypes, |
|
|
device=device, |
|
|
logger=logger, |
|
|
exp_dir=exp_dir |
|
|
) |
|
|
|
|
|
if args.distributed: |
|
|
if torch.distributed.is_initialized(): |
|
|
destroy_process_group() |
|
|
|
|
|
if local_rank == 0 and logger: |
|
|
logger.info("Main script execution finished.") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |