File size: 8,618 Bytes
25b6d23 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 |
import pandas as pd
import h5py
import numpy as np
from tqdm import tqdm
import os
def create_h5_temporal_split(csv_path, train_h5_path, test_h5_path, train_ratio=0.8):
"""
使用时间分割策略:每个用户的轨迹按时间分割,前80%用于训练,后20%用于测试
这样可以测试模型对同一用户未来轨迹的预测能力,同时保留所有用户的路径模式
"""
print(f"Loading data from {csv_path}...")
try:
df = pd.read_csv(csv_path, parse_dates=['datetime'])
except Exception as e:
print(f"Error reading or parsing CSV: {e}")
return
print("Sorting data by user and time...")
df.sort_values(by=['userid', 'datetime'], inplace=True)
all_user_ids = df['userid'].unique()
print(f"Total users: {len(all_user_ids)}")
print(f"Using temporal split: {train_ratio*100:.0f}% for training, {(1-train_ratio)*100:.0f}% for testing")
# 为训练集和测试集创建HDF5文件
train_sample_count = 0
test_sample_count = 0
with h5py.File(train_h5_path, 'w') as train_h5f, h5py.File(test_h5_path, 'w') as test_h5f:
for user_id in tqdm(all_user_ids, desc="Processing users"):
user_df = df[df['userid'] == user_id].sort_values('datetime')
# 按时间分割:前train_ratio用于训练,后面用于测试
split_point = int(len(user_df) * train_ratio)
train_user_df = user_df.iloc[:split_point]
test_user_df = user_df.iloc[split_point:]
# 处理训练数据(如果有足够的数据点)
if len(train_user_df) > 0:
timestamps = train_user_df['datetime'].apply(lambda x: x.timestamp()).values
latitudes = train_user_df['lat'].values
longitudes = train_user_df['lng'].values
train_user_group = train_h5f.create_group(f"{user_id}_train")
train_user_group.create_dataset('hours', data=timestamps, dtype='float64')
train_user_group.create_dataset('latitudes', data=latitudes, dtype='float64')
train_user_group.create_dataset('longitudes', data=longitudes, dtype='float64')
train_sample_count += len(timestamps)
# 处理测试数据(如果有足够的数据点)
if len(test_user_df) > 0:
timestamps = test_user_df['datetime'].apply(lambda x: x.timestamp()).values
latitudes = test_user_df['lat'].values
longitudes = test_user_df['lng'].values
test_user_group = test_h5f.create_group(f"{user_id}_test")
test_user_group.create_dataset('hours', data=timestamps, dtype='float64')
test_user_group.create_dataset('latitudes', data=latitudes, dtype='float64')
test_user_group.create_dataset('longitudes', data=longitudes, dtype='float64')
test_sample_count += len(timestamps)
print(f"\nData processing complete!")
print(f"Training samples: {train_sample_count}")
print(f"Testing samples: {test_sample_count}")
print(f"Train file saved to: {train_h5_path}")
print(f"Test file saved to: {test_h5_path}")
def create_h5_mixed_split(csv_path, train_h5_path, test_h5_path, full_test_users=5, temporal_ratio=0.8):
"""
混合分割策略:
- 少数用户完全作为测试集(测试跨用户泛化)
- 其余用户按时间分割(测试时间泛化)
"""
print(f"Loading data from {csv_path}...")
try:
df = pd.read_csv(csv_path, parse_dates=['datetime'])
except Exception as e:
print(f"Error reading or parsing CSV: {e}")
return
print("Sorting data by user and time...")
df.sort_values(by=['userid', 'datetime'], inplace=True)
all_user_ids = df['userid'].unique()
# 随机选择几个用户完全作为测试集
np.random.seed(42) # 固定随机种子确保可重复
full_test_user_ids = set(np.random.choice(all_user_ids, size=full_test_users, replace=False))
temporal_split_user_ids = set(all_user_ids) - full_test_user_ids
print(f"Total users: {len(all_user_ids)}")
print(f"Users for temporal split: {len(temporal_split_user_ids)}")
print(f"Users completely in test set: {len(full_test_user_ids)}")
print(f"Full test users: {sorted(full_test_user_ids)}")
train_sample_count = 0
test_sample_count = 0
with h5py.File(train_h5_path, 'w') as train_h5f, h5py.File(test_h5_path, 'w') as test_h5f:
# 处理时间分割的用户
for user_id in tqdm(temporal_split_user_ids, desc="Processing temporal split users"):
user_df = df[df['userid'] == user_id].sort_values('datetime')
split_point = int(len(user_df) * temporal_ratio)
train_user_df = user_df.iloc[:split_point]
test_user_df = user_df.iloc[split_point:]
# 训练数据
if len(train_user_df) > 0:
timestamps = train_user_df['datetime'].apply(lambda x: x.timestamp()).values
latitudes = train_user_df['lat'].values
longitudes = train_user_df['lng'].values
train_user_group = train_h5f.create_group(f"{user_id}_temporal")
train_user_group.create_dataset('hours', data=timestamps, dtype='float64')
train_user_group.create_dataset('latitudes', data=latitudes, dtype='float64')
train_user_group.create_dataset('longitudes', data=longitudes, dtype='float64')
train_sample_count += len(timestamps)
# 测试数据
if len(test_user_df) > 0:
timestamps = test_user_df['datetime'].apply(lambda x: x.timestamp()).values
latitudes = test_user_df['lat'].values
longitudes = test_user_df['lng'].values
test_user_group = test_h5f.create_group(f"{user_id}_temporal")
test_user_group.create_dataset('hours', data=timestamps, dtype='float64')
test_user_group.create_dataset('latitudes', data=latitudes, dtype='float64')
test_user_group.create_dataset('longitudes', data=longitudes, dtype='float64')
test_sample_count += len(timestamps)
# 处理完全作为测试集的用户
for user_id in tqdm(full_test_user_ids, desc="Processing full test users"):
user_df = df[df['userid'] == user_id].sort_values('datetime')
timestamps = user_df['datetime'].apply(lambda x: x.timestamp()).values
latitudes = user_df['lat'].values
longitudes = user_df['lng'].values
test_user_group = test_h5f.create_group(f"{user_id}_full")
test_user_group.create_dataset('hours', data=timestamps, dtype='float64')
test_user_group.create_dataset('latitudes', data=latitudes, dtype='float64')
test_user_group.create_dataset('longitudes', data=longitudes, dtype='float64')
test_sample_count += len(timestamps)
print(f"\nMixed split processing complete!")
print(f"Training samples: {train_sample_count}")
print(f"Testing samples: {test_sample_count}")
print(f"Train file saved to: {train_h5_path}")
print(f"Test file saved to: {test_h5_path}")
if __name__ == '__main__':
# 配置
# 直接使用新的轨迹数据文件
CSV_DATA_PATH = 'data/matched_trajectory_data.csv'
output_dir = 'data'
print(f"将使用输入文件: {CSV_DATA_PATH}")
print("将使用时间分割策略生成 train_temporal.h5 和 test_temporal.h5")
# 定义输出路径
TRAIN_H5_PATH = os.path.join(output_dir, 'train_temporal.h5')
TEST_H5_PATH = os.path.join(output_dir, 'test_temporal.h5')
# 运行转换
create_h5_temporal_split(CSV_DATA_PATH, TRAIN_H5_PATH, TEST_H5_PATH)
# 验证生成的文件
print("\n验证生成的HDF5文件...")
try:
with h5py.File(TRAIN_H5_PATH, 'r') as h5f:
print(f"训练集包含 {len(h5f.keys())} 个用户组")
if h5f.keys():
sample_key = list(h5f.keys())[0]
sample_group = h5f[sample_key]
print(f"示例用户组 '{sample_key}':")
for dset_name in sample_group.keys():
dset = sample_group[dset_name]
print(f" - {dset_name}: {dset.shape}")
except Exception as e:
print(f"验证文件时出错: {e}") |