|
| 1 | +"""🧑💻 command line interface for NiaAML""" |
| 2 | + |
| 3 | +from pathlib import Path |
| 4 | +from typing import Optional |
| 5 | + |
| 6 | +from loguru import logger |
| 7 | +import pandas as pd |
| 8 | +import typer |
| 9 | +from typing_extensions import Annotated |
| 10 | + |
| 11 | +from niaaml import PipelineOptimizer, Pipeline |
| 12 | +from niaaml.data.csv_data_reader import CSVDataReader |
| 13 | + |
| 14 | + |
| 15 | +app = typer.Typer( |
| 16 | + help="🌳 a command line interface for NiaAML.", |
| 17 | + no_args_is_help=True |
| 18 | +) |
| 19 | + |
| 20 | + |
| 21 | +@app.command() |
| 22 | +def optimize( |
| 23 | + data_csv_file: Path, |
| 24 | + has_header: bool = True, |
| 25 | + ignore_columns: list[int] = [], |
| 26 | + classifiers: list[str] = ['AdaBoost', 'Bagging', 'MultiLayerPerceptron', 'RandomForest', 'ExtremelyRandomizedTrees', 'LinearSVC'], |
| 27 | + feature_selection_algorithms: list[str] = ['SelectKBest', 'SelectPercentile', 'ParticleSwarmOptimization', 'VarianceThreshold'], |
| 28 | + feature_transform_algorithms: list[str] = ['Normalizer', 'StandardScaler'], |
| 29 | + categorical_features_encoder: Annotated[Optional[str], typer.Option()] = "OneHotEncoder", |
| 30 | + imputer: Annotated[Optional[str], typer.Option()] = None, |
| 31 | + fitness_name: str = 'Accuracy', |
| 32 | + pipeline_population_size: int = 15, |
| 33 | + inner_population_size: int = 15, |
| 34 | + number_of_pipeline_evaluations: int = 100, |
| 35 | + number_of_inner_evaluations: int = 100, |
| 36 | + optimization_algorithm: str = 'ParticleSwarmAlgorithm', |
| 37 | + inner_optimization_algorithm: Annotated[Optional[str], typer.Option()] = None, |
| 38 | + result_file: Path = Path("pipeline.ppln"), |
| 39 | +) -> None: |
| 40 | + """🦾 optimizes a NiaAML pipeline on a given dataset.""" |
| 41 | + # 📄 load and setup data |
| 42 | + logger.info(f"📄 reading `{data_csv_file}`") |
| 43 | + data_reader = CSVDataReader( |
| 44 | + src=str(data_csv_file), |
| 45 | + has_header=has_header, |
| 46 | + contains_classes=True, |
| 47 | + ignore_columns=ignore_columns |
| 48 | + ) |
| 49 | + |
| 50 | + # 🦾 setup pipeline |
| 51 | + logger.info("🦾 start the optimization process ...") |
| 52 | + pipeline_optimizer = PipelineOptimizer( |
| 53 | + data=data_reader, |
| 54 | + classifiers=classifiers, |
| 55 | + feature_selection_algorithms=feature_selection_algorithms, |
| 56 | + feature_transform_algorithms=feature_transform_algorithms, |
| 57 | + categorical_features_encoder=categorical_features_encoder, |
| 58 | + imputer=imputer, |
| 59 | + ) |
| 60 | + |
| 61 | + # 📈 optimize pipeline |
| 62 | + pipeline = pipeline_optimizer.run(fitness_name, pipeline_population_size, inner_population_size, number_of_pipeline_evaluations, number_of_inner_evaluations, optimization_algorithm, inner_optimization_algorithm) |
| 63 | + |
| 64 | + # 💾 save pipeline |
| 65 | + logger.success(f"💾 saving optimized pipeline to `{result_file}`") |
| 66 | + pipeline.export(result_file) |
| 67 | + |
| 68 | +@app.command() |
| 69 | +def infer( |
| 70 | + data_csv_file: Path, |
| 71 | + has_header: bool = True, |
| 72 | + ignore_columns: list[int] = [], |
| 73 | + pipeline_file: Path = Path("pipeline.ppln"), |
| 74 | + predictions_csv_file: Path = Path("preds.csv"), |
| 75 | +) -> None: |
| 76 | + """🔮 use an optimized NiaAML pipeline to make predictions.""" |
| 77 | + # 💾 load pipeline |
| 78 | + pipeline = Pipeline.load(pipeline_file) |
| 79 | + |
| 80 | + # 📄 load and setup data |
| 81 | + logger.info(f"📄 reading `{data_csv_file}`") |
| 82 | + reader = CSVDataReader( |
| 83 | + src=str(data_csv_file), |
| 84 | + has_header=has_header, |
| 85 | + contains_classes=True, |
| 86 | + ignore_columns=ignore_columns |
| 87 | + ) |
| 88 | + reader._read_data() |
| 89 | + x: pd.DataFrame = reader._x |
| 90 | + |
| 91 | + # 🔮 make predictions |
| 92 | + logger.info(f"🔮 using `{pipeline_file}` to make predictions on the data") |
| 93 | + x['preds'] = pipeline.run(x) |
| 94 | + |
| 95 | + # 💾 save predictions |
| 96 | + logger.success(f"💾 saving predictions to `{predictions_csv_file}`") |
| 97 | + x.to_csv(predictions_csv_file) |
| 98 | + |
| 99 | +def main(): |
| 100 | + """🚪 typer entry point for the CLI.""" |
| 101 | + app() |
| 102 | + |
| 103 | +if __name__ == "__main__": |
| 104 | + main() |
0 commit comments