-
Notifications
You must be signed in to change notification settings - Fork 0
/
02_synthesize_data.py
executable file
·133 lines (114 loc) · 3.16 KB
/
02_synthesize_data.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
#!/usr/bin/env python3
import numba
import numpy as np
import pandas as pd
# Import stations
stations = pd.read_csv(
'data/divvy_stations_2013-2017.csv',
usecols = [
'snapshot_id',
'capacity',
'proxy_id',
'proxy_latitude',
'proxy_longitude'
]
)
# Import trips
trips = pd.read_csv(
'data/divvy_trips_2013-2017.csv',
parse_dates = ['start_datetime', 'stop_datetime'],
infer_datetime_format = True,
usecols = [
'start_datetime',
'stop_datetime',
'start_station_snapshot_id',
'stop_station_snapshot_id'
]
)
# Round datetimes to next hour
trips['start_datetime'] = trips['start_datetime'].dt.ceil('h')
trips['stop_datetime'] = trips['stop_datetime'].dt.ceil('h')
# Each trip changes the number of bikes in two stations
bikes = pd.concat((trips, trips))
arrivals = bikes.index.duplicated()
# Set +1 bike for arrivals, -1 bike for departures
bikes['variation'] = np.where(arrivals, 1, -1)
# Keep stop datetime for arrivals, start datetime for departures
bikes['datetime'] = bikes['stop_datetime'].where(
arrivals,
bikes['start_datetime']
)
# Keep stop snapshot id for arrivals, start snapshot id for departures
bikes['snapshot_id'] = bikes['stop_station_snapshot_id'].where(
arrivals,
bikes['start_station_snapshot_id']
)
# Sort by station and datetime
bikes = bikes.merge(
stations,
on = 'snapshot_id'
).drop(
[
'start_datetime',
'stop_datetime',
'start_station_snapshot_id',
'stop_station_snapshot_id',
'snapshot_id'
],
axis = 1
).sort_values(
['proxy_id', 'datetime']
)
# Delete stations with 0 capacity
bikes = bikes[bikes['capacity'] > 0]
# Sum variations occuring at the same datetime
bikes = bikes.groupby(
['proxy_id', 'proxy_latitude', 'proxy_longitude', 'capacity', 'datetime'],
as_index = False
).agg({
'variation': sum
}).set_index(
['proxy_id', 'datetime']
)
# Infer the number of available bikes based on variation and capacity
@numba.njit
def bounded_cumulative_sum(array, min = np.nan, max = np.nan, start = 0):
result = np.zeros(array.size)
previous = start
for i in range(0, array.size):
result[i] = np.fmax(min, np.fmin(max, previous + array[i]))
previous = result[i]
return result
bikes['available_bikes'] = bikes.groupby('proxy_id').apply(
lambda group: bounded_cumulative_sum(
group['variation'].to_numpy(),
min = 0,
max = group['capacity'].iloc[0],
start = group['capacity'].iloc[0] // 2
)
).explode().astype(int).values
bikes = bikes.drop('variation', axis = 1)
# Remove first 200 changes of each station for bike availability calibration
bikes = bikes.groupby('proxy_id').apply(
lambda group: group.reset_index(
'proxy_id',
drop = True
).iloc[200:]
)
# Fill missing datetimes with preceding values
bikes = bikes.groupby('proxy_id').apply(
lambda group: group.reset_index(
['proxy_id'],
drop = True
).reindex(
pd.date_range(
start = group.index.get_level_values('datetime').min(),
end = group.index.get_level_values('datetime').max(),
freq = '1H'
),
method = 'pad'
).rename_axis(index = 'datetime')
)
# Output bikes
bikes.to_csv('data/divvy_bikes_2013-2017.csv')
print(bikes.info())