|
|
import os |
|
|
import torch |
|
|
import numpy as np |
|
|
from tqdm import tqdm |
|
|
import torch.nn.functional as F |
|
|
|
|
|
from utils.metric import * |
|
|
from dataset.data_util import MinMaxScaler |
|
|
from utils.utils import mask_data_general, ddp_setup, continuous_mask_data, continuous_time_based_mask, mask_multiple_segments |
|
|
|
|
|
|
|
|
|
|
|
def test_model(test_dataloader, diffusion_model, short_samples_model, config, epoch, |
|
|
prototypes, device, logger, exp_dir): |
|
|
""" |
|
|
Test the unified Diffusion model (DDPM or DDIM) on the test dataset. |
|
|
|
|
|
Args: |
|
|
test_dataloader: DataLoader for test data. |
|
|
diffusion_model: The unified diffusion model (instance of diffProModel.Diffusion.Diffusion). |
|
|
short_samples_model: Trajectory transformer model for feature extraction. |
|
|
config: Configuration object. |
|
|
epoch: Current epoch number (or identifier for the test run). |
|
|
prototypes: Prototype vectors (e.g., from TrajectoryTransformer or K-Means). |
|
|
device: Device to run the model on (already determined by the caller). |
|
|
logger: Logger object. |
|
|
exp_dir: Experiment directory path. |
|
|
""" |
|
|
|
|
|
distributed = config.training.dis_gpu |
|
|
local_rank = 0 |
|
|
if distributed: |
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
local_rank = int(os.environ.get('LOCAL_RANK', 0)) |
|
|
except ValueError: |
|
|
if logger: logger.warning("LOCAL_RANK environment variable not a valid integer. Defaulting to 0.") |
|
|
local_rank = 0 |
|
|
|
|
|
|
|
|
thresholds = [i for i in range(1000, 11000, 1000)] |
|
|
|
|
|
mtd_list, mppe_list, maepp_list, maeps_list, aptc_list, avg_aptc_list, max_td_list = [], [], [], [], [], [], [] |
|
|
|
|
|
|
|
|
sampling_type = getattr(config.sampling, 'type', 'ddpm') |
|
|
ddim_steps = getattr(config.sampling, 'ddim_steps', 50) |
|
|
ddim_eta = getattr(config.sampling, 'ddim_eta', 0.0) |
|
|
debug_mode = getattr(config, 'debug', False) |
|
|
|
|
|
if logger and local_rank == 0: |
|
|
logger.info(f"Testing with sampling_type: {sampling_type} for epoch {epoch}") |
|
|
if sampling_type == 'ddim': |
|
|
logger.info(f"DDIM steps: {ddim_steps}, DDIM eta: {ddim_eta}") |
|
|
|
|
|
diffusion_model.eval() |
|
|
short_samples_model.eval() |
|
|
|
|
|
pbar_desc = f"Epoch {epoch} Test Progress ({sampling_type.upper()})" |
|
|
for batch_idx, (abs_time, lat, lng) in enumerate(tqdm(test_dataloader, desc=pbar_desc, disable=(local_rank != 0))): |
|
|
|
|
|
if debug_mode and logger and local_rank == 0: |
|
|
logger.info(f"Batch {batch_idx} - Input shapes: abs_time {abs_time.shape}, lat {lat.shape}, lng {lng.shape}") |
|
|
logger.info(f"Input data stats - abs_time: min={abs_time.min().item():.4f}, max={abs_time.max().item():.4f}, " + |
|
|
f"lat: min={lat.min().item():.4f}, max={lat.max().item():.4f}, " + |
|
|
f"lng: min={lng.min().item():.4f}, max={lng.max().item():.4f}") |
|
|
|
|
|
if torch.isnan(abs_time).any() or torch.isnan(lat).any() or torch.isnan(lng).any(): |
|
|
if logger and local_rank == 0: logger.error(f"Batch {batch_idx} - NaN detected in input data!") |
|
|
continue |
|
|
|
|
|
|
|
|
|
|
|
testx_raw = torch.stack([abs_time, lat, lng], dim=-1).to(device) |
|
|
|
|
|
|
|
|
scaler = MinMaxScaler(global_params_file='./data/robust_normalization_params.json') |
|
|
scaler.fit(testx_raw) |
|
|
testx_scaled = scaler.transform(testx_raw) |
|
|
|
|
|
if debug_mode and logger and local_rank == 0: |
|
|
logger.info(f"Scaler min: {scaler.min_val.flatten().cpu().numpy()}, max: {scaler.max_val.flatten().cpu().numpy()}") |
|
|
|
|
|
if torch.isnan(testx_scaled).any(): |
|
|
if logger and local_rank == 0: |
|
|
logger.error(f"Batch {batch_idx} - NaN detected after scaling!") |
|
|
if torch.any(scaler.max_val == scaler.min_val): |
|
|
logger.error("Division by zero in scaler possible: max_val equals min_val for some features.") |
|
|
continue |
|
|
|
|
|
|
|
|
testx_scaled_permuted = testx_scaled.permute(0, 2, 1) |
|
|
|
|
|
|
|
|
if config.masking_strategy == 'general': |
|
|
masked_condition_permuted = mask_data_general(testx_scaled_permuted) |
|
|
elif config.masking_strategy == 'continuous': |
|
|
masked_condition_permuted = continuous_mask_data(testx_scaled_permuted, config.mask_ratio) |
|
|
elif config.masking_strategy == 'time_based': |
|
|
masked_condition_permuted = continuous_time_based_mask(testx_scaled_permuted, points_to_mask=config.mask_points_per_hour) |
|
|
elif config.masking_strategy == 'multi_segment': |
|
|
masked_condition_permuted = mask_multiple_segments(testx_scaled_permuted, points_per_segment=config.mask_segments) |
|
|
else: |
|
|
raise ValueError(f"Unknown masking strategy: {config.masking_strategy}") |
|
|
|
|
|
masked_condition = masked_condition_permuted.permute(0, 2, 1) |
|
|
|
|
|
with torch.no_grad(): |
|
|
_, query_features = short_samples_model(masked_condition) |
|
|
|
|
|
if torch.isnan(query_features).any(): |
|
|
if logger and local_rank == 0: logger.error(f"Batch {batch_idx} - NaN detected in query_features!") |
|
|
continue |
|
|
if torch.isnan(prototypes).any(): |
|
|
if logger and local_rank == 0: logger.error(f"Batch {batch_idx} - NaN detected in provided prototypes!") |
|
|
continue |
|
|
|
|
|
|
|
|
|
|
|
cos_sim = F.cosine_similarity(query_features.unsqueeze(1), prototypes.unsqueeze(0), dim=-1) |
|
|
if torch.isnan(cos_sim).any(): |
|
|
if logger and local_rank == 0: logger.error(f"Batch {batch_idx} - NaN detected in cos_sim!") |
|
|
continue |
|
|
|
|
|
|
|
|
d_k = query_features.size(-1) |
|
|
scaled_cos_sim = F.softmax(cos_sim / np.sqrt(d_k), dim=-1) |
|
|
matched_prototypes_for_diffusion = torch.matmul(scaled_cos_sim, prototypes).to(device) |
|
|
|
|
|
if torch.isnan(matched_prototypes_for_diffusion).any(): |
|
|
if logger and local_rank == 0: logger.error(f"Batch {batch_idx} - NaN detected in matched_prototypes!") |
|
|
continue |
|
|
|
|
|
if debug_mode and logger and local_rank == 0: |
|
|
logger.info(f"Sampling with type: {sampling_type}, DDIM steps: {ddim_steps}, eta: {ddim_eta}") |
|
|
logger.info(f"Input to diffusion model (testx_scaled_permuted) shape: {testx_scaled_permuted.shape}, " |
|
|
f"masked condition (masked_condition_permuted) shape: {masked_condition_permuted.shape}, " |
|
|
f"matched prototypes shape: {matched_prototypes_for_diffusion.shape}") |
|
|
|
|
|
try: |
|
|
|
|
|
pred_x0_scaled = diffusion_model.sample( |
|
|
test_x0=testx_scaled_permuted, |
|
|
attr=masked_condition_permuted, |
|
|
prototype=matched_prototypes_for_diffusion, |
|
|
sampling_type=sampling_type, |
|
|
ddim_num_steps=ddim_steps, |
|
|
ddim_eta=ddim_eta |
|
|
) |
|
|
|
|
|
if torch.isnan(pred_x0_scaled).any(): |
|
|
if logger and local_rank == 0: logger.error(f"Batch {batch_idx} - NaN detected in Diffusion model output!") |
|
|
continue |
|
|
|
|
|
except Exception as e: |
|
|
if logger and local_rank == 0: logger.error(f"Exception during Diffusion model sampling: {str(e)}") |
|
|
import traceback |
|
|
if logger and local_rank == 0: logger.error(traceback.format_exc()) |
|
|
continue |
|
|
|
|
|
|
|
|
pred_x0_scaled_unpermuted = pred_x0_scaled.permute(0, 2, 1) |
|
|
|
|
|
if debug_mode and logger and local_rank == 0: |
|
|
logger.info(f"pred_x0_scaled_unpermuted stats before inverse_transform: min={pred_x0_scaled_unpermuted.min().item():.4f}, max={pred_x0_scaled_unpermuted.max().item():.4f}") |
|
|
|
|
|
if (pred_x0_scaled_unpermuted < 0).any() or (pred_x0_scaled_unpermuted > 1).any(): |
|
|
if logger and local_rank == 0: |
|
|
logger.warning(f"Batch {batch_idx} - Values outside [0,1] in pred_x0_scaled: min={pred_x0_scaled_unpermuted.min().item():.4f}, max={pred_x0_scaled_unpermuted.max().item():.4f}. Clamping.") |
|
|
pred_x0_scaled_unpermuted = torch.clamp(pred_x0_scaled_unpermuted, 0, 1) |
|
|
|
|
|
|
|
|
pred_x0_final = scaler.inverse_transform(pred_x0_scaled_unpermuted) |
|
|
|
|
|
ground_truth_final = testx_raw.cpu() |
|
|
|
|
|
if torch.isnan(pred_x0_final).any() or torch.isnan(ground_truth_final).any(): |
|
|
if logger and local_rank == 0: logger.error(f"Batch {batch_idx} - NaN detected after inverse transform!") |
|
|
continue |
|
|
|
|
|
|
|
|
pred_x0_np = pred_x0_final.cpu().numpy() |
|
|
ground_truth_np = ground_truth_final.numpy() |
|
|
|
|
|
if debug_mode and logger and local_rank == 0: |
|
|
logger.info(f"Shapes for metrics: pred_x0_np {pred_x0_np.shape}, ground_truth_np {ground_truth_np.shape}") |
|
|
logger.info(f"pred_x0_np stats: min={np.min(pred_x0_np):.4f}, max={np.max(pred_x0_np):.4f}") |
|
|
logger.info(f"ground_truth_np stats: min={np.min(ground_truth_np):.4f}, max={np.max(ground_truth_np):.4f}") |
|
|
|
|
|
try: |
|
|
mtd_list.append(mean_trajectory_deviation(pred_x0_np, ground_truth_np)) |
|
|
mppe_list.append(mean_point_to_point_error(pred_x0_np, ground_truth_np)) |
|
|
maepp_list.append(mean_absolute_error_per_point(pred_x0_np[:, :, 0], ground_truth_np[:, :, 0])) |
|
|
maeps_list.append(mean_absolute_error_per_sample(pred_x0_np[:, :, 0], ground_truth_np[:, :, 0])) |
|
|
aptc_result, avg_aptc_result = trajectory_coverage(pred_x0_np, ground_truth_np, thresholds) |
|
|
aptc_list.append(aptc_result) |
|
|
avg_aptc_list.append(avg_aptc_result) |
|
|
max_td_list.append(max_trajectory_deviation(pred_x0_np, ground_truth_np)) |
|
|
except Exception as e: |
|
|
if logger and local_rank == 0: logger.error(f"Exception during metric calculation in batch {batch_idx}: {str(e)}") |
|
|
if debug_mode and logger and local_rank == 0: import traceback; logger.error(traceback.format_exc()) |
|
|
continue |
|
|
|
|
|
if debug_mode and batch_idx == 0 and os.environ.get('PROJECT_DEBUG_MODE', '0') == '1': |
|
|
if logger and local_rank == 0: logger.info("Project debug mode: Breaking after first test batch") |
|
|
break |
|
|
|
|
|
|
|
|
if local_rank == 0: |
|
|
mean_mtd = np.mean(mtd_list) if mtd_list else float('nan') |
|
|
mean_mppe = np.mean(mppe_list) if mppe_list else float('nan') |
|
|
mean_maepp = np.mean(maepp_list) if maepp_list else float('nan') |
|
|
mean_maeps = np.mean(maeps_list) if maeps_list else float('nan') |
|
|
mean_avg_aptc = np.mean(avg_aptc_list) if avg_aptc_list else float('nan') |
|
|
mean_max_td = np.max(max_td_list) if max_td_list else float('nan') |
|
|
mean_aptc_thresholds = {k: np.mean([d[k] for d in aptc_list if k in d]) for k in aptc_list[0]} if aptc_list else {f'TC@{thr}': float('nan') for thr in thresholds} |
|
|
|
|
|
if logger: |
|
|
logger.info(f"--- Test Results for Epoch {epoch} ({sampling_type.upper()}) ---") |
|
|
logger.info(f"Mean MTD: {mean_mtd:.4f}") |
|
|
logger.info(f"Mean MPPE: {mean_mppe:.4f}") |
|
|
logger.info(f"Mean MAEPP (time): {mean_maepp:.4f}") |
|
|
logger.info(f"Mean MAEPS (time): {mean_maeps:.4f}") |
|
|
logger.info(f"Mean AVG_TC: {mean_avg_aptc:.4f}") |
|
|
logger.info(f"Overall MaxTD: {mean_max_td:.4f}") |
|
|
for threshold_val, tc_val in mean_aptc_thresholds.items(): |
|
|
logger.info(f"Mean {threshold_val}: {tc_val:.4f}") |
|
|
if sampling_type == 'ddim': |
|
|
logger.info(f"DDIM sampling with {ddim_steps} steps, eta: {ddim_eta:.2f}") |
|
|
else: |
|
|
logger.info(f"DDPM sampling with {config.diffusion.num_diffusion_timesteps} steps") |
|
|
|
|
|
|
|
|
results_dir = exp_dir / 'results' |
|
|
os.makedirs(results_dir, exist_ok=True) |
|
|
sampling_prefix = f"{sampling_type.upper()}_" |
|
|
|
|
|
def save_metric_npy(metric_name, value, current_epoch): |
|
|
file_path = results_dir / f"{sampling_prefix}Test_mean_{metric_name}.npy" |
|
|
if np.isnan(value): return |
|
|
if os.path.exists(file_path): |
|
|
try: |
|
|
existing_data = np.load(file_path, allow_pickle=True).item() |
|
|
except: |
|
|
existing_data = {} |
|
|
existing_data[current_epoch] = value |
|
|
else: |
|
|
existing_data = {current_epoch: value} |
|
|
np.save(file_path, existing_data) |
|
|
|
|
|
save_metric_npy('mtd', mean_mtd, epoch) |
|
|
save_metric_npy('mppe', mean_mppe, epoch) |
|
|
save_metric_npy('maepp', mean_maepp, epoch) |
|
|
save_metric_npy('maeps', mean_maeps, epoch) |
|
|
save_metric_npy('avg_aptc', mean_avg_aptc, epoch) |
|
|
save_metric_npy('max_td', mean_max_td, epoch) |
|
|
for threshold_key, tc_value in mean_aptc_thresholds.items(): |
|
|
metric_key_name = threshold_key.replace('@', '_at_') |
|
|
save_metric_npy(f"tc_{metric_key_name}", tc_value, epoch) |
|
|
|
|
|
if logger: logger.info(f"Saved test metrics to {results_dir}") |
|
|
|
|
|
|
|
|
if torch.distributed.is_initialized(): |
|
|
torch.distributed.barrier() |
|
|
|
|
|
return { |
|
|
"mean_mtd": mean_mtd, |
|
|
"mean_mppe": mean_mppe |
|
|
} if local_rank == 0 else {} |