File size: 3,861 Bytes
fa1cc41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import pandas as pd
import h5py
import numpy as np
from tqdm import tqdm
import os

def create_h5_from_csv(csv_path, train_h5_path, test_h5_path, test_split_ratio=0.1):
    """
    Reads trajectory data from a CSV file, processes it, and saves it into
    HDF5 files structured for the TrajectoryDataset.

    The HDF5 file will have a group for each user, containing datasets for
    'hours' (as Unix timestamps), 'latitudes', and 'longitudes'.
    """
    print(f"Loading data from {csv_path}...")
    try:
        df = pd.read_csv(csv_path, parse_dates=['datetime'])
    except Exception as e:
        print(f"Error reading or parsing CSV: {e}")
        return

    print("Sorting data by user and time...")
    df.sort_values(by=['userid', 'datetime'], inplace=True)

    all_user_ids = df['userid'].unique()
    test_user_count = int(len(all_user_ids) * test_split_ratio)
    test_user_ids = set(np.random.choice(all_user_ids, size=test_user_count, replace=False))
    
    print(f"Total users: {len(all_user_ids)}")
    print(f"Training users: {len(all_user_ids) - test_user_count}")
    print(f"Test users: {test_user_count}")

    # Process for both train and test sets
    for h5_path, user_ids, set_name in [(train_h5_path, all_user_ids - test_user_ids, "train"), 
                                        (test_h5_path, test_user_ids, "test")]:
        
        if not user_ids:
            print(f"No users for {set_name} set, skipping.")
            continue

        print(f"\nCreating {set_name} HDF5 file at {h5_path}...")
        with h5py.File(h5_path, 'w') as h5f:
            # Group by userid
            grouped = df[df['userid'].isin(user_ids)].groupby('userid')
            
            for user_id, user_df in tqdm(grouped, desc=f"Processing {set_name} users"):
                # Ensure data is sorted by time for each user
                user_df = user_df.sort_values('datetime')
                
                # Convert datetime to unix timestamp for 'hours'
                # The original code used 'hours', but absolute time is more robust.
                timestamps = user_df['datetime'].apply(lambda x: x.timestamp()).values
                latitudes = user_df['lat'].values
                longitudes = user_df['lng'].values

                # Create a group for the user
                user_group = h5f.create_group(str(user_id))
                
                # Store data in the user's group
                user_group.create_dataset('hours', data=timestamps, dtype='float64')
                user_group.create_dataset('latitudes', data=latitudes, dtype='float64')
                user_group.create_dataset('longitudes', data=longitudes, dtype='float64')
        
        print(f"{set_name.capitalize()} data processing complete. File saved to {h5_path}")


if __name__ == '__main__':
    # Configuration
    CSV_DATA_PATH = 'data/May_trajectory_data.csv'
    
    # Define output paths inside the 'data' directory
    output_dir = 'data'
    os.makedirs(output_dir, exist_ok=True)
    TRAIN_H5_PATH = os.path.join(output_dir, 'train.h5')
    TEST_H5_PATH = os.path.join(output_dir, 'test.h5')

    # Run the conversion
    create_h5_from_csv(CSV_DATA_PATH, TRAIN_H5_PATH, TEST_H5_PATH)
    
    # Optional: Verify the created file structure for one user
    print("\nVerifying HDF5 file structure...")
    try:
        with h5py.File(TRAIN_H5_PATH, 'r') as h5f:
            if list(h5f.keys()):
                sample_user_id = list(h5f.keys())[0]
                print(f"Sample user '{sample_user_id}' in {TRAIN_H5_PATH}:")
                for dset in h5f[sample_user_id].keys():
                    print(f"  - Dataset: {dset}, Shape: {h5f[sample_user_id][dset].shape}")
            else:
                print("Train HDF5 file is empty.")
    except Exception as e:
        print(f"Could not verify HDF5 file: {e}")