import pandas as pd import h5py import numpy as np from tqdm import tqdm import os def create_h5_temporal_split(csv_path, train_h5_path, test_h5_path, train_ratio=0.8): """ 使用时间分割策略:每个用户的轨迹按时间分割,前80%用于训练,后20%用于测试 这样可以测试模型对同一用户未来轨迹的预测能力,同时保留所有用户的路径模式 """ print(f"Loading data from {csv_path}...") try: df = pd.read_csv(csv_path, parse_dates=['datetime']) except Exception as e: print(f"Error reading or parsing CSV: {e}") return print("Sorting data by user and time...") df.sort_values(by=['userid', 'datetime'], inplace=True) all_user_ids = df['userid'].unique() print(f"Total users: {len(all_user_ids)}") print(f"Using temporal split: {train_ratio*100:.0f}% for training, {(1-train_ratio)*100:.0f}% for testing") # 为训练集和测试集创建HDF5文件 train_sample_count = 0 test_sample_count = 0 with h5py.File(train_h5_path, 'w') as train_h5f, h5py.File(test_h5_path, 'w') as test_h5f: for user_id in tqdm(all_user_ids, desc="Processing users"): user_df = df[df['userid'] == user_id].sort_values('datetime') # 按时间分割:前train_ratio用于训练,后面用于测试 split_point = int(len(user_df) * train_ratio) train_user_df = user_df.iloc[:split_point] test_user_df = user_df.iloc[split_point:] # 处理训练数据(如果有足够的数据点) if len(train_user_df) > 0: timestamps = train_user_df['datetime'].apply(lambda x: x.timestamp()).values latitudes = train_user_df['lat'].values longitudes = train_user_df['lng'].values train_user_group = train_h5f.create_group(f"{user_id}_train") train_user_group.create_dataset('hours', data=timestamps, dtype='float64') train_user_group.create_dataset('latitudes', data=latitudes, dtype='float64') train_user_group.create_dataset('longitudes', data=longitudes, dtype='float64') train_sample_count += len(timestamps) # 处理测试数据(如果有足够的数据点) if len(test_user_df) > 0: timestamps = test_user_df['datetime'].apply(lambda x: x.timestamp()).values latitudes = test_user_df['lat'].values longitudes = test_user_df['lng'].values test_user_group = test_h5f.create_group(f"{user_id}_test") test_user_group.create_dataset('hours', data=timestamps, dtype='float64') test_user_group.create_dataset('latitudes', data=latitudes, dtype='float64') test_user_group.create_dataset('longitudes', data=longitudes, dtype='float64') test_sample_count += len(timestamps) print(f"\nData processing complete!") print(f"Training samples: {train_sample_count}") print(f"Testing samples: {test_sample_count}") print(f"Train file saved to: {train_h5_path}") print(f"Test file saved to: {test_h5_path}") def create_h5_mixed_split(csv_path, train_h5_path, test_h5_path, full_test_users=5, temporal_ratio=0.8): """ 混合分割策略: - 少数用户完全作为测试集(测试跨用户泛化) - 其余用户按时间分割(测试时间泛化) """ print(f"Loading data from {csv_path}...") try: df = pd.read_csv(csv_path, parse_dates=['datetime']) except Exception as e: print(f"Error reading or parsing CSV: {e}") return print("Sorting data by user and time...") df.sort_values(by=['userid', 'datetime'], inplace=True) all_user_ids = df['userid'].unique() # 随机选择几个用户完全作为测试集 np.random.seed(42) # 固定随机种子确保可重复 full_test_user_ids = set(np.random.choice(all_user_ids, size=full_test_users, replace=False)) temporal_split_user_ids = set(all_user_ids) - full_test_user_ids print(f"Total users: {len(all_user_ids)}") print(f"Users for temporal split: {len(temporal_split_user_ids)}") print(f"Users completely in test set: {len(full_test_user_ids)}") print(f"Full test users: {sorted(full_test_user_ids)}") train_sample_count = 0 test_sample_count = 0 with h5py.File(train_h5_path, 'w') as train_h5f, h5py.File(test_h5_path, 'w') as test_h5f: # 处理时间分割的用户 for user_id in tqdm(temporal_split_user_ids, desc="Processing temporal split users"): user_df = df[df['userid'] == user_id].sort_values('datetime') split_point = int(len(user_df) * temporal_ratio) train_user_df = user_df.iloc[:split_point] test_user_df = user_df.iloc[split_point:] # 训练数据 if len(train_user_df) > 0: timestamps = train_user_df['datetime'].apply(lambda x: x.timestamp()).values latitudes = train_user_df['lat'].values longitudes = train_user_df['lng'].values train_user_group = train_h5f.create_group(f"{user_id}_temporal") train_user_group.create_dataset('hours', data=timestamps, dtype='float64') train_user_group.create_dataset('latitudes', data=latitudes, dtype='float64') train_user_group.create_dataset('longitudes', data=longitudes, dtype='float64') train_sample_count += len(timestamps) # 测试数据 if len(test_user_df) > 0: timestamps = test_user_df['datetime'].apply(lambda x: x.timestamp()).values latitudes = test_user_df['lat'].values longitudes = test_user_df['lng'].values test_user_group = test_h5f.create_group(f"{user_id}_temporal") test_user_group.create_dataset('hours', data=timestamps, dtype='float64') test_user_group.create_dataset('latitudes', data=latitudes, dtype='float64') test_user_group.create_dataset('longitudes', data=longitudes, dtype='float64') test_sample_count += len(timestamps) # 处理完全作为测试集的用户 for user_id in tqdm(full_test_user_ids, desc="Processing full test users"): user_df = df[df['userid'] == user_id].sort_values('datetime') timestamps = user_df['datetime'].apply(lambda x: x.timestamp()).values latitudes = user_df['lat'].values longitudes = user_df['lng'].values test_user_group = test_h5f.create_group(f"{user_id}_full") test_user_group.create_dataset('hours', data=timestamps, dtype='float64') test_user_group.create_dataset('latitudes', data=latitudes, dtype='float64') test_user_group.create_dataset('longitudes', data=longitudes, dtype='float64') test_sample_count += len(timestamps) print(f"\nMixed split processing complete!") print(f"Training samples: {train_sample_count}") print(f"Testing samples: {test_sample_count}") print(f"Train file saved to: {train_h5_path}") print(f"Test file saved to: {test_h5_path}") if __name__ == '__main__': # 配置 # 直接使用新的轨迹数据文件 CSV_DATA_PATH = 'data/matched_trajectory_data.csv' output_dir = 'data' print(f"将使用输入文件: {CSV_DATA_PATH}") print("将使用时间分割策略生成 train_temporal.h5 和 test_temporal.h5") # 定义输出路径 TRAIN_H5_PATH = os.path.join(output_dir, 'train_temporal.h5') TEST_H5_PATH = os.path.join(output_dir, 'test_temporal.h5') # 运行转换 create_h5_temporal_split(CSV_DATA_PATH, TRAIN_H5_PATH, TEST_H5_PATH) # 验证生成的文件 print("\n验证生成的HDF5文件...") try: with h5py.File(TRAIN_H5_PATH, 'r') as h5f: print(f"训练集包含 {len(h5f.keys())} 个用户组") if h5f.keys(): sample_key = list(h5f.keys())[0] sample_group = h5f[sample_key] print(f"示例用户组 '{sample_key}':") for dset_name in sample_group.keys(): dset = sample_group[dset_name] print(f" - {dset_name}: {dset.shape}") except Exception as e: print(f"验证文件时出错: {e}")