|
| 1 | +import torch |
| 2 | +import argparse |
| 3 | +import pandas as pd |
| 4 | +import torch.nn as nn |
| 5 | +from tsa import TimeSeriesDataset, AutoEncForecast, train, evaluate |
| 6 | +from .config_reconstruction import config |
| 7 | + |
| 8 | +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
| 9 | + |
| 10 | +def parse_args(): |
| 11 | + parser = argparse.ArgumentParser() |
| 12 | + parser.add_argument("--batch-size", default=config["batch_size"], type=int, help="batch size") |
| 13 | + parser.add_argument("--output-size", default=config["output_size"], type=int, |
| 14 | + help="size of the ouput: default value to 1 for forecasting") |
| 15 | + parser.add_argument("--label-col", default=config["label_col"], type=str, help="name of the target column") |
| 16 | + parser.add_argument("--input-att", default=config["input_att"], type=lambda x: (str(x).lower() == "true"), |
| 17 | + help="whether or not activate the input attention mechanism") |
| 18 | + parser.add_argument("--temporal-att", default=config["temporal_att"], type=lambda x: (str(x).lower() == "true"), |
| 19 | + help="whether or not activate the temporal attention mechanism") |
| 20 | + parser.add_argument("--seq-len", default=config["seq_len"], type=int, help="window length to use for forecasting") |
| 21 | + parser.add_argument("--hidden-size-encoder", default=config["hidden_size_encoder"], type=int, |
| 22 | + help="size of the encoder's hidden states") |
| 23 | + parser.add_argument("--hidden-size-decoder", default=config["hidden_size_decoder"], type=int, |
| 24 | + help="size of the decoder's hidden states") |
| 25 | + parser.add_argument("--reg-factor1", default=config["reg_factor1"], type=float, |
| 26 | + help="contribution factor of the L1 regularization if using a sparse autoencoder") |
| 27 | + parser.add_argument("--reg-factor2", default=config["reg_factor2"], type=float, |
| 28 | + help="contribution factor of the L2 regularization if using a sparse autoencoder") |
| 29 | + parser.add_argument("--reg1", default=config["reg1"], type=lambda x: (str(x).lower() == "true"), |
| 30 | + help="activate/deactivate L1 regularization") |
| 31 | + parser.add_argument("--reg2", default=config["reg2"], type=lambda x: (str(x).lower() == "true"), |
| 32 | + help="activate/deactivate L2 regularization") |
| 33 | + parser.add_argument("--denoising", default=config["denoising"], type=lambda x: (str(x).lower() == "true"), |
| 34 | + help="whether or not to use a denoising autoencoder") |
| 35 | + parser.add_argument("--do-train", default=True, type=lambda x: (str(x).lower() == "true"), |
| 36 | + help="whether or not to train the model") |
| 37 | + parser.add_argument("--do-eval", default=False, type=lambda x: (str(x).lower() == "true"), |
| 38 | + help="whether or not evaluating the mode") |
| 39 | + parser.add_argument("--output-dir", default=config["output_dir"], help="name of folder to output files") |
| 40 | + parser.add_argument("--ckpt", default=None, help="checkpoint path for evaluation") |
| 41 | + return parser.parse_args() |
| 42 | + |
| 43 | + |
| 44 | +if __name__ == "__main__": |
| 45 | + args = vars(parse_args()) |
| 46 | + config.update(args) |
| 47 | + |
| 48 | + df = pd.read_csv("data/AirQualityUCI.csv", index_col=config["index_col"]) |
| 49 | + |
| 50 | + ts = TimeSeriesDataset( |
| 51 | + data=df, |
| 52 | + categorical_cols=config["categorical_cols"], |
| 53 | + target_col=config["label_col"], |
| 54 | + seq_length=config["seq_len"], |
| 55 | + prediction_window=config["prediction_window"] |
| 56 | + ) |
| 57 | + train_iter, test_iter, nb_features = ts.get_loaders(batch_size=config["batch_size"]) |
| 58 | + |
| 59 | + model = AutoEncForecast(config, input_size=nb_features).to(config["device"]) |
| 60 | + criterion = nn.MSELoss() |
| 61 | + optimizer = torch.optim.Adam(model.parameters(), lr=config["lr"]) |
| 62 | + |
| 63 | + if config["do_eval"] and config["ckpt"]: |
| 64 | + model, _, loss, epoch = load_checkpoint(config["ckpt"], model, optimizer, config["device"]) |
| 65 | + evaluate(test_iter, loss, model, config) |
| 66 | + elif config["do_train"]: |
| 67 | + train(train_iter, test_iter, model, criterion, optimizer, config) |
0 commit comments