Preprocess data for different datasets.
This script standardizes column names, filters data, and calculates text metrics.
It is designed to be modular, allowing for easy addition of new datasets and processing steps.
main()
Main function to execute dataset processing.
Configures the data settings and initiates the data processing.
Source code in src/data/preprocessing/preprocess_data.py
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91 | def main() -> int:
"""
Main function to execute dataset processing.
Configures the data settings and initiates the data processing.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='')
args = parser.parse_args()
dataset = args.dataset
datasets = dataset.split(',') if dataset else list(DataSets)
for dataset_name in datasets:
logger.info(f'Processing {dataset_name}...')
data_args = get_data_args(dataset_name)
if not data_args:
logger.warning(
f'No data args found for {dataset_name}. Skipping...',
)
continue
try:
processor = get_processor(data_args)
processed_data = processor.process()
processor.save_processed_data(processed_data=processed_data)
logger.info(f'Finished processing {dataset_name}')
except FileNotFoundError as e:
logger.warning(f'FileNotFoundError processing {dataset_name}: {e}')
return 0
|