import pandas as pd import h5py import numpy as np from tqdm import tqdm import os def create_h5_from_csv(csv_path, train_h5_path, test_h5_path, test_split_ratio=0.1): """ Reads trajectory data from a CSV file, processes it, and saves it into HDF5 files structured for the TrajectoryDataset. The HDF5 file will have a group for each user, containing datasets for 'hours' (as Unix timestamps), 'latitudes', and 'longitudes'. """ print(f"Loading data from {csv_path}...") try: df = pd.read_csv(csv_path, parse_dates=['datetime']) except Exception as e: print(f"Error reading or parsing CSV: {e}") return print("Sorting data by user and time...") df.sort_values(by=['userid', 'datetime'], inplace=True) all_user_ids = df['userid'].unique() test_user_count = int(len(all_user_ids) * test_split_ratio) test_user_ids = set(np.random.choice(all_user_ids, size=test_user_count, replace=False)) print(f"Total users: {len(all_user_ids)}") print(f"Training users: {len(all_user_ids) - test_user_count}") print(f"Test users: {test_user_count}") # Process for both train and test sets for h5_path, user_ids, set_name in [(train_h5_path, all_user_ids - test_user_ids, "train"), (test_h5_path, test_user_ids, "test")]: if not user_ids: print(f"No users for {set_name} set, skipping.") continue print(f"\nCreating {set_name} HDF5 file at {h5_path}...") with h5py.File(h5_path, 'w') as h5f: # Group by userid grouped = df[df['userid'].isin(user_ids)].groupby('userid') for user_id, user_df in tqdm(grouped, desc=f"Processing {set_name} users"): # Ensure data is sorted by time for each user user_df = user_df.sort_values('datetime') # Convert datetime to unix timestamp for 'hours' # The original code used 'hours', but absolute time is more robust. timestamps = user_df['datetime'].apply(lambda x: x.timestamp()).values latitudes = user_df['lat'].values longitudes = user_df['lng'].values # Create a group for the user user_group = h5f.create_group(str(user_id)) # Store data in the user's group user_group.create_dataset('hours', data=timestamps, dtype='float64') user_group.create_dataset('latitudes', data=latitudes, dtype='float64') user_group.create_dataset('longitudes', data=longitudes, dtype='float64') print(f"{set_name.capitalize()} data processing complete. File saved to {h5_path}") if __name__ == '__main__': # Configuration CSV_DATA_PATH = 'data/May_trajectory_data.csv' # Define output paths inside the 'data' directory output_dir = 'data' os.makedirs(output_dir, exist_ok=True) TRAIN_H5_PATH = os.path.join(output_dir, 'train.h5') TEST_H5_PATH = os.path.join(output_dir, 'test.h5') # Run the conversion create_h5_from_csv(CSV_DATA_PATH, TRAIN_H5_PATH, TEST_H5_PATH) # Optional: Verify the created file structure for one user print("\nVerifying HDF5 file structure...") try: with h5py.File(TRAIN_H5_PATH, 'r') as h5f: if list(h5f.keys()): sample_user_id = list(h5f.keys())[0] print(f"Sample user '{sample_user_id}' in {TRAIN_H5_PATH}:") for dset in h5f[sample_user_id].keys(): print(f" - Dataset: {dset}, Shape: {h5f[sample_user_id][dset].shape}") else: print("Train HDF5 file is empty.") except Exception as e: print(f"Could not verify HDF5 file: {e}")