-
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpreprocessor.py
More file actions
329 lines (277 loc) · 11.2 KB
/
preprocessor.py
File metadata and controls
329 lines (277 loc) · 11.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
"""
Time Series Data Preprocessor
----------------------------
Handles loading, preprocessing, and transformation of time series data from various sources.
Includes support for CSV, Excel, JSON, and pandas DataFrame inputs.
Features:
- Automatic data type detection and parsing
- Missing value handling
- Feature scaling and normalization
- Sliding window creation
- Time-based feature engineering
- Memory-efficient data loading for large datasets
"""
import pandas as pd
import numpy as np
import torch
from typing import Union, Optional, List, Dict, Tuple
from pathlib import Path
from datetime import datetime
from loguru import logger
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import pyarrow as pa
import pyarrow.parquet as pq
from dateutil.parser import parse
import json
class TimeSeriesPreprocessor:
"""Preprocesses time series data for the efficient transformer model."""
def __init__(
self,
time_column: str,
feature_columns: List[str],
target_columns: Optional[List[str]] = None,
sequence_length: int = 100,
stride: int = 1,
batch_size: int = 32,
scaling_method: str = 'standard',
fill_method: str = 'forward',
device: str = "cuda" if torch.cuda.is_available() else "cpu"
):
"""
Initialize the preprocessor.
Args:
time_column: Name of the timestamp column
feature_columns: List of feature column names
target_columns: List of target column names (if different from features)
sequence_length: Length of sequences to generate
stride: Stride for sliding window
batch_size: Batch size for data loading
scaling_method: 'standard' or 'minmax'
fill_method: Method for handling missing values
device: Device to store tensors on
"""
self.time_column = time_column
self.feature_columns = feature_columns
self.target_columns = target_columns or feature_columns
self.sequence_length = sequence_length
self.stride = stride
self.batch_size = batch_size
self.scaling_method = scaling_method
self.fill_method = fill_method
self.device = device
# Initialize scalers
self.scalers = {}
for col in self.feature_columns:
self.scalers[col] = (
StandardScaler() if scaling_method == 'standard' else MinMaxScaler()
)
logger.info(f"Initialized TimeSeriesPreprocessor with {len(feature_columns)} features")
def _validate_file(self, file_path: Union[str, Path]) -> Path:
"""Validate file existence and format."""
file_path = Path(file_path)
if not file_path.exists():
raise FileNotFoundError(f"File not found: {file_path}")
supported_extensions = {'.csv', '.xlsx', '.parquet', '.json'}
if file_path.suffix not in supported_extensions:
raise ValueError(f"Unsupported file format. Supported formats: {supported_extensions}")
return file_path
def _parse_time(self, time_series: pd.Series) -> pd.Series:
"""Parse time column to datetime."""
try:
if time_series.dtype == 'object':
return pd.to_datetime(time_series.apply(parse))
return pd.to_datetime(time_series)
except Exception as e:
logger.error(f"Error parsing time column: {str(e)}")
raise
def load_data(
self,
source: Union[str, Path, pd.DataFrame],
chunk_size: Optional[int] = None
) -> pd.DataFrame:
"""
Load data from various sources.
Args:
source: File path or DataFrame
chunk_size: Size of chunks for large files
Returns:
Loaded DataFrame
"""
try:
if isinstance(source, pd.DataFrame):
df = source
else:
file_path = self._validate_file(source)
if chunk_size:
# Memory-efficient loading for large files
if file_path.suffix == '.parquet':
return pq.read_table(file_path).to_pandas()
elif file_path.suffix == '.csv':
return pd.read_csv(file_path, chunksize=chunk_size)
elif file_path.suffix == '.xlsx':
return pd.read_excel(file_path, chunksize=chunk_size)
else: # JSON
with open(file_path) as f:
return pd.DataFrame(json.load(f))
else:
# Regular loading for smaller files
if file_path.suffix == '.parquet':
df = pq.read_table(file_path).to_pandas()
elif file_path.suffix == '.csv':
df = pd.read_csv(file_path)
elif file_path.suffix == '.xlsx':
df = pd.read_excel(file_path)
else: # JSON
with open(file_path) as f:
df = pd.DataFrame(json.load(f))
# Validate columns
missing_cols = (
set(self.feature_columns + [self.time_column]) - set(df.columns)
)
if missing_cols:
raise ValueError(f"Missing columns in data: {missing_cols}")
return df
except Exception as e:
logger.error(f"Error loading data: {str(e)}")
raise
def create_sequences(
self,
data: pd.DataFrame
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Create sequences for training using sliding windows.
Args:
data: Preprocessed DataFrame
Returns:
Tuple of (features, timestamps, targets)
"""
sequences = []
timestamps = []
targets = []
# Convert timestamps to indices
time_indices = pd.factorize(data[self.time_column])[0]
# Create sliding windows
for i in range(0, len(data) - self.sequence_length + 1, self.stride):
# Extract sequence
sequence = data[self.feature_columns].iloc[i:i + self.sequence_length].values
target = data[self.target_columns].iloc[i + self.sequence_length - 1].values
time_idx = time_indices[i:i + self.sequence_length]
sequences.append(sequence)
timestamps.append(time_idx)
targets.append(target)
# Convert to tensors
return (
torch.FloatTensor(sequences).to(self.device),
torch.LongTensor(timestamps).to(self.device),
torch.FloatTensor(targets).to(self.device)
)
def preprocess(
self,
source: Union[str, Path, pd.DataFrame],
fit_scalers: bool = True
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Main preprocessing pipeline.
Args:
source: Data source
fit_scalers: Whether to fit or just transform with scalers
Returns:
Preprocessed sequences ready for model
"""
logger.info("Starting preprocessing pipeline")
try:
# Load data
df = self.load_data(source)
# Parse time column
df[self.time_column] = self._parse_time(df[self.time_column])
# Sort by time
df = df.sort_values(self.time_column)
# Handle missing values
df[self.feature_columns] = df[self.feature_columns].fillna(
method=self.fill_method
)
# Scale features
scaled_features = df[self.feature_columns].copy()
for col in self.feature_columns:
if fit_scalers:
scaled_features[col] = self.scalers[col].fit_transform(
df[[col]]
)
else:
scaled_features[col] = self.scalers[col].transform(
df[[col]]
)
df[self.feature_columns] = scaled_features
# Create sequences
sequences = self.create_sequences(df)
logger.info(
f"Preprocessing complete. Created {len(sequences[0])} sequences"
)
return sequences
except Exception as e:
logger.error(f"Error in preprocessing pipeline: {str(e)}")
raise
def create_dataloader(
self,
sequences: Tuple[torch.Tensor, torch.Tensor, torch.Tensor],
shuffle: bool = True
) -> torch.utils.data.DataLoader:
"""Create DataLoader from sequences."""
dataset = torch.utils.data.TensorDataset(*sequences)
return torch.utils.data.DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=shuffle
)
def inverse_transform(
self,
scaled_data: torch.Tensor,
columns: List[str]
) -> torch.Tensor:
"""
Inverse transform scaled data back to original scale.
Args:
scaled_data: Scaled tensor
columns: Column names corresponding to tensor dimensions
Returns:
Tensor in original scale
"""
# Convert to numpy for inverse transform
data_np = scaled_data.cpu().numpy()
# Inverse transform each column
for i, col in enumerate(columns):
if col in self.scalers:
data_np[..., i] = self.scalers[col].inverse_transform(
data_np[..., i].reshape(-1, 1)
).ravel()
return torch.FloatTensor(data_np).to(self.device)
# Example usage
if __name__ == "__main__":
# Example configuration
config = {
"time_column": "timestamp",
"feature_columns": ["temperature", "humidity", "pressure"],
"sequence_length": 24, # 24 time steps sequence
"stride": 1,
"batch_size": 32
}
try:
# Initialize preprocessor
preprocessor = TimeSeriesPreprocessor(**config)
# Example with CSV file
sequences = preprocessor.preprocess("sensor_data.csv")
# Create DataLoader
dataloader = preprocessor.create_dataloader(sequences)
logger.info("Successfully created DataLoader from CSV data")
# Example with DataFrame
df = pd.DataFrame({
"timestamp": pd.date_range(start="2024-01-01", periods=1000, freq="H"),
"temperature": np.random.normal(25, 5, 1000),
"humidity": np.random.normal(60, 10, 1000),
"pressure": np.random.normal(1013, 5, 1000)
})
sequences = preprocessor.preprocess(df)
logger.info("Successfully preprocessed DataFrame")
except Exception as e:
logger.error(f"Error in example usage: {str(e)}")
raise