diff --git a/.github/workflows/test-mlperf-inference-mixtral.yml b/.github/workflows/test-mlperf-inference-mixtral.yml index e48cbb1e9..eb47f853f 100644 --- a/.github/workflows/test-mlperf-inference-mixtral.yml +++ b/.github/workflows/test-mlperf-inference-mixtral.yml @@ -1,11 +1,8 @@ -# This workflow will install Python dependencies, run tests and lint with a variety of Python versions -# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions - name: MLPerf inference MIXTRAL-8x7B on: schedule: - - cron: "59 19 * * *" # 30th minute and 20th hour => 20:30 UTC => 2 AM IST + - cron: "59 23 * * */5" # 30th minute and 20th hour => 20:30 UTC => 2 AM IST jobs: build_reference: diff --git a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml index e4d780780..c1f58e20a 100644 --- a/.github/workflows/test-nvidia-mlperf-inference-implementations.yml +++ b/.github/workflows/test-nvidia-mlperf-inference-implementations.yml @@ -43,6 +43,11 @@ jobs: gpu_name=rtx_4090 docker_string=" --docker" fi + if [ "${{ matrix.model }}" = "bert-99" ] || [ "${{ matrix.model }}" = "bert-99.9" ]; then + category="edge" + else + category="datacenter,edge" + fi if [ -f "gh_action/bin/deactivate" ]; then source gh_action/bin/deactivate; fi python3 -m venv gh_action @@ -51,6 +56,6 @@ jobs: pip install --upgrade mlcflow mlc pull repo mlcommons@mlperf-automations --branch=dev - mlcr --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r5.0-dev --preprocess_submission=yes --pull_changes=yes --pull_inference_changes=yes --execution_mode=valid --gpu_name=$gpu_name --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=$hw_name --implementation=nvidia --backend=tensorrt --category=datacenter,edge --division=closed --docker_dt --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cuda --use_model_from_host=yes --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean $docker_string --quiet + mlcr --tags=run-mlperf,inference,_all-scenarios,_submission,_full,_r5.0-dev --preprocess_submission=yes --pull_changes=yes --pull_inference_changes=yes --execution_mode=valid --gpu_name=$gpu_name --pull_changes=yes --pull_inference_changes=yes --model=${{ matrix.model }} --submitter="MLCommons" --hw_name=$hw_name --implementation=nvidia --backend=tensorrt --category=$category --division=closed --docker_dt --docker_mlc_repo=mlcommons@mlperf-automations --docker_mlc_repo_branch=dev --adr.compiler.tags=gcc --device=cuda --use_model_from_host=yes --use_dataset_from_host=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean $docker_string --quiet mlcr --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/mlcommons/mlperf_inference_unofficial_submissions_v5.0 --repo_branch=auto-update --commit_message="Results from GH action on NVIDIA_$hw_name" --quiet --submission_dir=$HOME/gh_action_submissions --hw_name=$hw_name diff --git a/docs/cm-yaml-guide.md b/docs/cm-yaml-guide.md deleted file mode 100644 index 2b0b1242b..000000000 --- a/docs/cm-yaml-guide.md +++ /dev/null @@ -1,46 +0,0 @@ -This README provides a walkthrough of the `_cm.yaml` file. - -## Keys and Datatypes followed - -1. **alias**: `string` -2. **uid**: `string` -3. **automation_alias**: `string` -4. **automation_uid**: `string` -5. **category**: `string` -6. **developers**: `list of strings` -7. **tags**: `list of strings` -8. **default_env**: `dictionary` - Contains key-value pairs where values are `strings` -9. **env**: `dictionary` - Contains key-value pairs where values are `strings` -10. **input_mapping**: `dictionary` - Contains key-value pairs where values are `strings` -11. **env_key_mapping**: `dictionary` - Contains key-value pairs where values are `strings` -12. **new_env_keys**: `list of strings` -13. **new_state_keys**: `list of strings` -14. **deps**: `list of dictionaries` - Each dictionary can contain `tags` or other nested keys -15. **names**: `list of strings` -16. **enable_if_env**: `dictionary` - Contains key-value pairs where values are lists of `strings` -17. **skip_if_env**: `dictionary` - Contains key-value pairs where values are lists of `strings` -18. **prehook_deps**: `list of dictionaries` - Each dictionary may contain `names` and `tags` as lists -19. **posthook_deps**: `list of dictionaries` - Each dictionary may contain `tags` and other keys -20. **variation_groups_order**: `list of strings` -21. **variations**: `dictionary` - Each variation is a dictionary containing keys like `alias`, `default_variations`, `group`, etc. -22. **group**: `string` -23. **add_deps_recursive**: `dictionary` - Contains nested `tags` and other keys -24. **default_variations**: `dictionary` - Contains key-value pairs where values are `strings` -25. **docker**: `dictionary` - Contains keys specific to Docker configurations: - - **base_image**: `string` - - **image_name**: `string` - - **os**: `string` - - **os_version**: `string` - - **deps**: `list of dictionaries` - Each dictionary can include `tags` or other keys. - - **env**: `dictionary` - Contains key-value pairs where values are `strings` - - **interactive**: `boolean` - - **extra_run_args**: `string` - - **mounts**: `list of strings` - Specifies mount paths in the format `"source:destination"` - - **pre_run_cmds**: `list of strings` - Commands to run before the container starts - - **docker_input_mapping**: `dictionary` - Contains key-value pairs where values are strings, mapping input parameters to Docker environment variables - - **use_host_user_id**: `boolean` - - **use_host_group_id**: `boolean` - - **skip_run_cmd**: `string` - - **shm_size**: `string` - - **real_run**: `boolean` - - **all_gpus**: `string` diff --git a/docs/getting-started.md b/docs/getting-started.md index baed31eea..2bf8ff5b2 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -1,30 +1,71 @@ +# Getting Started with MLC Script Automation -# Getting Started with CM Script Automation +## Running MLC Scripts -## Running CM Scripts - -To execute a simple script in CM that captures OS details, use the following command: +To execute a simple script in MLC that captures OS details, use the following command: ```bash -cm run script --tags=detect,os -j +mlcr detect,os -j ``` +* Here, `mlcr` is a shortform for `mlc run script --tags=` This command gathers details about the system on which it's run, such as: ```json -{ - "CM_HOST_OS_TYPE": "linux", - "CM_HOST_OS_BITS": "64", - "CM_HOST_OS_FLAVOR": "ubuntu", - "CM_HOST_OS_FLAVOR_LIKE": "debian", - "CM_HOST_OS_VERSION": "24.04", - "CM_HOST_OS_KERNEL_VERSION": "6.8.0-45-generic", - "CM_HOST_OS_GLIBC_VERSION": "2.39", - "CM_HOST_OS_MACHINE": "x86_64", - "CM_HOST_OS_PACKAGE_MANAGER": "apt", - "CM_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD": "DEBIAN_FRONTEND=noninteractive apt-get install -y", - "CM_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD": "apt-get update -y", - "+CM_HOST_OS_DEFAULT_LIBRARY_PATH": [ +$ mlcr detect,os -j +[2025-02-03 04:57:23,449 main.py:694 INFO] - Repos path for Index: /home/arjun/MLC/repos +[2025-02-03 04:57:24,167 main.py:837 INFO] - Shared index for script saved to /home/arjun/MLC/repos/index_script.json. +[2025-02-03 04:57:24,167 main.py:837 INFO] - Shared index for cache saved to /home/arjun/MLC/repos/index_cache.json. +[2025-02-03 04:57:24,167 main.py:837 INFO] - Shared index for experiment saved to /home/arjun/MLC/repos/index_experiment.json. +[2025-02-03 04:57:24,210 module.py:574 INFO] - * mlcr detect,os +[2025-02-03 04:57:24,213 module.py:5354 INFO] - ! cd /mnt/arjun/MLC/repos/gateoverflow@mlperf-automations +[2025-02-03 04:57:24,213 module.py:5355 INFO] - ! call /home/arjun/MLC/repos/gateoverflow@mlperf-automations/script/detect-os/run.sh from tmp-run.sh +[2025-02-03 04:57:24,245 module.py:5501 INFO] - ! call "postprocess" from /home/arjun/MLC/repos/gateoverflow@mlperf-automations/script/detect-os/customize.py +[2025-02-03 04:57:24,254 module.py:2195 INFO] - { + "return": 0, + "env": { + "MLC_HOST_OS_TYPE": "linux", + "MLC_HOST_OS_BITS": "64", + "MLC_HOST_OS_FLAVOR": "ubuntu", + "MLC_HOST_OS_FLAVOR_LIKE": "debian", + "MLC_HOST_OS_VERSION": "24.04", + "MLC_HOST_OS_KERNEL_VERSION": "6.8.0-52-generic", + "MLC_HOST_OS_GLIBC_VERSION": "2.39", + "MLC_HOST_OS_MACHINE": "x86_64", + "MLC_HOST_OS_PACKAGE_MANAGER": "apt", + "MLC_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD": "DEBIAN_FRONTEND=noninteractive apt-get install -y", + "MLC_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD": "apt-get update -y", + "+MLC_HOST_OS_DEFAULT_LIBRARY_PATH": [ + "/usr/local/lib/x86_64-linux-gnu", + "/lib/x86_64-linux-gnu", + "/usr/lib/x86_64-linux-gnu", + "/usr/lib/x86_64-linux-gnu64", + "/usr/local/lib64", + "/lib64", + "/usr/lib64", + "/usr/local/lib", + "/lib", + "/usr/lib", + "/usr/x86_64-linux-gnu/lib64", + "/usr/x86_64-linux-gnu/lib" + ], + "MLC_HOST_PLATFORM_FLAVOR": "x86_64", + "MLC_HOST_PYTHON_BITS": "64", + "MLC_HOST_SYSTEM_NAME": "arjun-spr" + }, + "new_env": { + "MLC_HOST_OS_TYPE": "linux", + "MLC_HOST_OS_BITS": "64", + "MLC_HOST_OS_FLAVOR": "ubuntu", + "MLC_HOST_OS_FLAVOR_LIKE": "debian", + "MLC_HOST_OS_VERSION": "24.04", + "MLC_HOST_OS_KERNEL_VERSION": "6.8.0-52-generic", + "MLC_HOST_OS_GLIBC_VERSION": "2.39", + "MLC_HOST_OS_MACHINE": "x86_64", + "MLC_HOST_OS_PACKAGE_MANAGER": "apt", + "MLC_HOST_OS_PACKAGE_MANAGER_INSTALL_CMD": "DEBIAN_FRONTEND=noninteractive apt-get install -y", + "MLC_HOST_OS_PACKAGE_MANAGER_UPDATE_CMD": "apt-get update -y", + "+MLC_HOST_OS_DEFAULT_LIBRARY_PATH": [ "/usr/local/lib/x86_64-linux-gnu", "/lib/x86_64-linux-gnu", "/usr/lib/x86_64-linux-gnu", @@ -38,98 +79,24 @@ This command gathers details about the system on which it's run, such as: "/usr/x86_64-linux-gnu/lib64", "/usr/x86_64-linux-gnu/lib" ], - "CM_HOST_PLATFORM_FLAVOR": "x86_64", - "CM_HOST_PYTHON_BITS": "64", - "CM_HOST_SYSTEM_NAME": "intel-spr-i9" + "MLC_HOST_PLATFORM_FLAVOR": "x86_64", + "MLC_HOST_PYTHON_BITS": "64", + "MLC_HOST_SYSTEM_NAME": "arjun-spr" + }, + "state": { + "os_uname_machine": "x86_64", + "os_uname_all": "Linux arjun-spr 6.8.0-52-generic #53-Ubuntu SMP PREEMPT_DYNAMIC Sat Jan 11 00:06:25 UTC 2025 x86_64 x86_64 x86_64 GNU/Linux" + }, + "new_state": { + "os_uname_machine": "x86_64", + "os_uname_all": "Linux arjun-spr 6.8.0-52-generic #53-Ubuntu SMP PREEMPT_DYNAMIC Sat Jan 11 00:06:25 UTC 2025 x86_64 x86_64 x86_64 GNU/Linux" + }, + "deps": [] } ``` -For more details on CM scripts, see the [CM documentation](index.md). - -### Adding New CM Scripts +For more details on MLC scripts, see the [MLC documentation](index.md). -CM aims to provide lightweight connectors between existing automation scripts and tools without substituting them. You can add your own scripts to CM with the following command, which creates a script named `hello-world`: - -```bash -cm add script hello-world --tags=hello-world,display,test -``` - -This command initializes a CM script in the local repository with the following structure: - -``` -└── CM - ├── index.json - ├── repos - │ ├── local - │ │ ├── cfg - │ │ ├── cache - │ │ ├── cmr.yaml - │ │ └── script - │ │ └── hello-world - │ │ ├── _cm.yaml - │ │ ├── customize.py - │ │ ├── README-extra.md - │ │ ├── run.bat - │ │ └── run.sh - │ └── mlcommons@cm4mlops - └── repos.json -``` You can also execute the script from Python as follows: -```python -import cmind -output = cmind.access({'action':'run', 'automation':'script', 'tags':'hello-world,display,test'}) -if output['return'] == 0: - print(output) -``` - -If you discover that your new script is similar to an existing script in any CM repository, you can clone an existing script using the following command: - -```bash -cm copy script .: -``` - -Here, `` is the name of the existing script, and `` is the name of the new script you're creating. Existing script names in the `cm4mlops` repository can be found [here](https://github.com/mlcommons/cm4mlops/tree/mlperf-inference/script). - -## Caching and Reusing CM Script Outputs - -By default, CM scripts run in the current directory and record all new files there. For example, a universal download script might download an image to the current directory: - -```bash -cm run script --tags=download,file,_wget --url=https://cKnowledge.org/ai/data/computer_mouse.jpg --verify=no --env.CM_DOWNLOAD_CHECKSUM=45ae5c940233892c2f860efdf0b66e7e -``` - -To cache and reuse the output of scripts, CM offers a `cache` automation feature similar to `script`. When `"cache":true` is specified in a script's metadata, CM will create a `cache` directory in `$HOME/CM/repos/local` with a unique ID and the same tags as `script`, and execute the script there. - -Subsequent executions of the same script will reuse files from the cache, avoiding redundancy. This is especially useful for large files or data sets. - -You can manage cache entries and find specific ones using commands like: - -```bash -cm show cache -cm show cache --tags=get,ml-model,resnet50,_onnx -cm find cache --tags=download,file,ml-model,resnet50,_onnx -cm info cache --tags=download,file,ml-model,resnet50,_onnx -``` - -To clean cache entries: - -```bash -cm rm cache --tags=ml-model,resnet50 -cm rm cache -f # Clean all entries -``` - -You can completely reset the CM framework by removing the `$HOME/CM` directory, which deletes all downloaded repositories and cached entries. - -## Integration with Containers - -CM scripts are designed to run natively or inside containers with the same commands. You can substitute `cm run script` with `cm docker script` to execute a script inside an automatically-generated container: - -```bash -cm docker script --tags=python,app,image-classification,onnx,_cpu -``` - -CM automatically handles the generation of Dockerfiles, building of containers, and execution within containers, providing a seamless experience whether running scripts natively or in containers. - -This approach simplifies the development process by eliminating the need for separate Dockerfile maintenance and allows for the use of native scripts and workflows directly within containers. diff --git a/docs/index.md b/docs/index.md index 9a74cd2b3..30de604f4 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,29 +1,40 @@ -# CM "script" automation specification +# MLC "script" automation specification -Please check the [CM documentation](https://docs.mlcommons.org/ck) for more details about the CM automation language. +Please check the [MLC documentation](https://docs.mlcommons.org/mlcflow) for more details about the MLCflow interface. -See the [automatically generated catalog](scripts/index.md) of all CM scripts from MLCommons. +See the [automatically generated catalog](scripts/index.md) of all the MLC scripts. -## Understanding CM scripts +## Understanding MLC scripts -* A CM script is identified by a set of tags and by unique ID. -* Further each CM script can have multiple variations and they are identified by variation tags which are treated in the same way as tags and identified by a `_` prefix. +* An MLC script is identified by a set of tags and by an unique ID. +* Further each MLC script can have multiple variations and they are identified by variation tags which are treated in the same way as tags and identified by a `_` prefix. -### CM script execution flow - +### MLC script execution flow +```mermaid +graph TD + MLC -->|env = incoming env + env_from_meta| B[Script] + B -->|env - local_env_keys| C[List of Dependencies] + C --> D[Preprocess] + D -->|env - local_env_keys| E[Prehook dependencies] + E -->F[Run script] + F -->|env - clean_env_keys_post_deps| G[Posthook dependencies] + G --> H[Postprocess] + H -->|env - clean_env_keys_post_deps| I[Post dependencies] + I -->|"env(new_env_keys)"| J[Script return] +``` -* When a CM script is invoked (either by tags or by unique ID), its `_cm.json` is processed first which will check for any `deps` script and if there are, then they are executed in order. +* When an MLC script is invoked (either by tags or by unique ID), its `meta.yaml` is processed first which will check for any `deps` script and if there are, then they are executed in order. * Once all the `deps` scripts are executed, `customize.py` file is checked and if existing `preprocess` function inside it is executed if present. -* Then any `prehook_deps` CM scripts mentioned in `_cm.json` are executed similar to `deps` +* Then any `prehook_deps` scripts mentioned in `meta.yaml` are executed similar to `deps` * After this, keys in `env` dictionary is exported as `ENV` variables and `run` file if exists is executed. -* Once run file execution is done, any `posthook_deps` CM scripts mentioned in `_cm.json` are executed similar to `deps` +* Once run file execution is done, any `posthook_deps` scripts mentioned in `meta.yaml` are executed similar to `deps` * Then `postprocess` function inside customize.py is executed if present. -* After this stage any `post_deps` CM scripts mentioned in `_cm.json` is executed. +* After this stage any `post_deps` scripts mentioned in `meta.yaml` is executed. ** If a script is already cached, then the `preprocess`, `run file` and `postprocess` executions won't happen and only the dependencies marked as `dynamic` will be executed from `deps`, `prehook_deps`, `posthook_deps` and `postdeps`. ### Input flags -When we run a CM script we can also pass inputs to it and any input added in `input_mapping` dictionary inside `_cm.json` gets converted to the corresponding `ENV` variable. +When we run an MLC script we can also pass inputs to it and any input added in `input_mapping` dictionary inside `meta.yaml` gets converted to the corresponding `ENV` variable. ### Conditional execution of any `deps`, `post_deps` We can use `skip_if_env` dictionary inside any `deps`, `prehook_deps`, `posthook_deps` or `post_deps` to make its execution conditional @@ -36,7 +47,7 @@ We can specify any specific version of a script using `version`. `version_max` a * When `version_max` is given, any version below this if present in the cache or detected in the system can be chosen. If nothing is detected `default_version` if present and if below `version_max` will be used for installation. Otherwise `version_max_usable` (additional needed input for `version_max`) will be used as `version`. ### Variations -* Variations are used to customize CM script and each unique combination of variations uses a unique cache entry. Each variation can turn on `env` keys also any other meta including dependencies specific to it. Variations are turned on like tags but with a `_` prefix. For example, if a script is having tags `"get,myscript"`, to call the variation `"test"` inside it, we have to use tags `"get,myscript,_test"`. +* Variations are used to customize MLC script and each unique combination of variations uses a unique cache entry. Each variation can turn on `env` keys also any other meta including dependencies specific to it. Variations are turned on like tags but with a `_` prefix. For example, if a script is having tags `"get,myscript"`, to call the variation `"test"` inside it, we have to use tags `"get,myscript,_test"`. #### Variation groups `group` is a key to map variations into a group and at any time only one variation from a group can be used in the variation tags. For example, both `cpu` and `cuda` can be two variations under the `device` group, but user can at any time use either `cpu` or `cuda` as variation tags but not both. @@ -44,7 +55,7 @@ We can specify any specific version of a script using `version`. `version_max` a #### Dynamic variations Sometimes it is difficult to add all variations needed for a script like say `batch_size` which can take many different values. To handle this case, we support dynamic variations using '#' where '#' can be dynamically replaced by any string. For example, `"_batch_size.8"` can be used as a tag to turn on the dynamic variation `"_batch_size.#"`. -### ENV flow during CM script execution +### ENV flow during MLC script execution * During a given script execution incoming `env` dictionary is saved `(saved_env)` and all the updates happens on a copy of it. @@ -52,12 +63,12 @@ Sometimes it is difficult to add all variations needed for a script like say `ba * Same behaviour applies to `state` dictionary. #### Special env keys -* Any env key with a prefix `CM_TMP_*` and `CM_GIT_*` are not passed by default to any dependency. These can be force passed by adding the key(s) to the `force_env_keys` list of the concerned dependency. +* Any env key with a prefix `MLC_TMP_*` and `MLC_GIT_*` are not passed by default to any dependency. These can be force passed by adding the key(s) to the `force_env_keys` list of the concerned dependency. * Similarly we can avoid any env key from being passed to a given dependency by adding the prefix of the key in the `clean_env_keys` list of the concerned dependency. -* `--input` is automatically converted to `CM_INPUT` env key -* `version` is converted to `CM_VERSION`, ``version_min` to `CM_VERSION_MIN` and `version_max` to `CM_VERSION_MAX` -* If `env['CM_GH_TOKEN']=TOKEN_VALUE` is set then git URLs (specified by `CM_GIT_URL`) are changed to add this token. -* If `env['CM_GIT_SSH']=yes`, then git URLs are changed to SSH from HTTPS. +* `--input` is automatically converted to `MLC_INPUT` env key +* `version` is converted to `MLC_VERSION`, ``version_min` to `MLC_VERSION_MIN` and `version_max` to `MLC_VERSION_MAX` +* If `env['MLC_GH_TOKEN']=TOKEN_VALUE` is set then git URLs (specified by `MLC_GIT_URL`) are changed to add this token. +* If `env['MLC_GIT_SSH']=yes`, then git URLs are changed to SSH from HTTPS. ### Script Meta #### Special keys in script meta @@ -70,10 +81,10 @@ Sometimes it is difficult to add all variations needed for a script like say `ba * By default no depndencies are run for a cached entry unless `dynamic` key is set for it. -Please see [here](getting-started.md) for trying CM scripts. +Please see [here](getting-started.md) for trying MLC scripts. -© 2022-24 [MLCommons](https://mlcommons.org)
+© 2022-25 [MLCommons](https://mlcommons.org)
diff --git a/docs/requirements.txt b/docs/requirements.txt index ee5149cfc..526baf529 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -3,4 +3,3 @@ swagger-markdown mkdocs-macros-plugin ruamel.yaml slugify -mkdocs-caseinsensitive-plugin diff --git a/docs/scripts/AI-ML-datasets/get-croissant/index.md b/docs/scripts/AI-ML-datasets/get-croissant/index.md deleted file mode 100644 index f707f1f85..000000000 --- a/docs/scripts/AI-ML-datasets/get-croissant/index.md +++ /dev/null @@ -1,86 +0,0 @@ -# get-croissant -Automatically generated README for this automation recipe: **get-croissant** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-croissant/_cm.yaml)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get mlcommons croissant" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,mlcommons,croissant - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get mlcommons croissant " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,mlcommons,croissant' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get mlcommons croissant" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-croissant/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-croissant/run.bat) -___ -#### Script output -```bash -cmr "get mlcommons croissant " -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-dataset-cifar10/index.md b/docs/scripts/AI-ML-datasets/get-dataset-cifar10/index.md deleted file mode 100644 index f74ec73ef..000000000 --- a/docs/scripts/AI-ML-datasets/get-dataset-cifar10/index.md +++ /dev/null @@ -1,119 +0,0 @@ -# get-dataset-cifar10 -Automatically generated README for this automation recipe: **get-dataset-cifar10** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-cifar10/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get dataset cifar10 image-classification validation training" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,dataset,cifar10,image-classification,validation,training[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get dataset cifar10 image-classification validation training [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,dataset,cifar10,image-classification,validation,training' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get dataset cifar10 image-classification validation training[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_tiny` - - ENV variables: - - CM_DATASET_CONVERT_TO_TINYMLPERF: `yes` - -
- - - * Group "**data_format**" -
- Click here to expand this section. - - * **`_python`** (default) - - ENV variables: - - CM_DATASET: `CIFAR10` - - CM_DATASET_FILENAME: `cifar-10-python.tar.gz` - - CM_DATASET_FILENAME1: `cifar-10-python.tar` - - CM_DATASET_CIFAR10: `https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz` - -
- - - ##### Default variations - - `_python` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-cifar10/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-cifar10/run.bat) -___ -#### Script output -```bash -cmr "get dataset cifar10 image-classification validation training [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-dataset-cnndm/index.md b/docs/scripts/AI-ML-datasets/get-dataset-cnndm/index.md deleted file mode 100644 index 22ae3381a..000000000 --- a/docs/scripts/AI-ML-datasets/get-dataset-cnndm/index.md +++ /dev/null @@ -1,128 +0,0 @@ -# get-dataset-cnndm -Automatically generated README for this automation recipe: **get-dataset-cnndm** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-cnndm/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get dataset gpt-j cnndm cnn-dailymail original" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,dataset,gpt-j,cnndm,cnn-dailymail,original[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get dataset gpt-j cnndm cnn-dailymail original [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,dataset,gpt-j,cnndm,cnn-dailymail,original' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get dataset gpt-j cnndm cnn-dailymail original[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_intel` - -
- - - * Group "**dataset-type**" -
- Click here to expand this section. - - * `_calibration` - - ENV variables: - - CM_DATASET_CALIBRATION: `yes` - * **`_validation`** (default) - - ENV variables: - - CM_DATASET_CALIBRATION: `no` - -
- - - ##### Default variations - - `_validation` -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_DATASET_CALIBRATION: `no` - - - -#### Native script being run -=== "Linux/macOS" - * [run-intel.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-cnndm/run-intel.sh) - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-cnndm/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get dataset gpt-j cnndm cnn-dailymail original [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-dataset-coco/index.md b/docs/scripts/AI-ML-datasets/get-dataset-coco/index.md deleted file mode 100644 index 98c9f978e..000000000 --- a/docs/scripts/AI-ML-datasets/get-dataset-coco/index.md +++ /dev/null @@ -1,140 +0,0 @@ -# get-dataset-coco -Automatically generated README for this automation recipe: **get-dataset-coco** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-coco/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-coco/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get dataset object-detection coco" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,dataset,object-detection,coco[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get dataset object-detection coco [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,dataset,object-detection,coco' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get dataset object-detection coco[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**size**" -
- Click here to expand this section. - - * **`_complete`** (default) - - ENV variables: - - CM_DATASET_COCO_SIZE: `complete` - * `_small` - - ENV variables: - - CM_DATASET_COCO_SIZE: `small` - -
- - - * Group "**type**" -
- Click here to expand this section. - - * `_train` - - ENV variables: - - CM_DATASET_COCO_TYPE: `train` - * **`_val`** (default) - - ENV variables: - - CM_DATASET_COCO_TYPE: `val` - -
- - - * Group "**version**" -
- Click here to expand this section. - - * **`_2017`** (default) - - ENV variables: - - CM_DATASET_COCO_VERSION: `2017` - -
- - - ##### Default variations - - `_2017,_complete,_val` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--from=value` → `CM_FROM=value` - * `--home=value` → `CM_HOME_DIR=value` - * `--store=value` → `CM_STORE=value` - * `--to=value` → `CM_TO=value` - - - - -___ -#### Script output -```bash -cmr "get dataset object-detection coco [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-dataset-coco2014/index.md b/docs/scripts/AI-ML-datasets/get-dataset-coco2014/index.md deleted file mode 100644 index 23e09b06f..000000000 --- a/docs/scripts/AI-ML-datasets/get-dataset-coco2014/index.md +++ /dev/null @@ -1,152 +0,0 @@ -# get-dataset-coco2014 -Automatically generated README for this automation recipe: **get-dataset-coco2014** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-coco2014/_cm.yaml)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get dataset coco2014 object-detection original" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,dataset,coco2014,object-detection,original[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get dataset coco2014 object-detection original [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,dataset,coco2014,object-detection,original' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get dataset coco2014 object-detection original[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**annotations**" -
- Click here to expand this section. - - * `_custom-annotations` - - ENV variables: - - CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS: `yes` - * **`_default-annotations`** (default) - - ENV variables: - - CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS: `no` - -
- - - * Group "**dataset-type**" -
- Click here to expand this section. - - * `_calibration` - - ENV variables: - - CM_DATASET_CALIBRATION: `yes` - * **`_validation`** (default) - - ENV variables: - - CM_DATASET_CALIBRATION: `no` - -
- - - * Group "**size**" -
- Click here to expand this section. - - * **`_50`** (default) - - ENV variables: - - CM_DATASET_SIZE: `50` - * `_500` - - ENV variables: - - CM_DATASET_SIZE: `500` - * `_full` - - ENV variables: - - CM_DATASET_SIZE: `` - * `_size.#` - - ENV variables: - - CM_DATASET_SIZE: `#` - -
- - - ##### Default variations - - `_50,_default-annotations,_validation` -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_DATASET_CALIBRATION: `no` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-coco2014/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-coco2014/run.bat) -___ -#### Script output -```bash -cmr "get dataset coco2014 object-detection original [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-dataset-criteo/index.md b/docs/scripts/AI-ML-datasets/get-dataset-criteo/index.md deleted file mode 100644 index f28c6e10d..000000000 --- a/docs/scripts/AI-ML-datasets/get-dataset-criteo/index.md +++ /dev/null @@ -1,124 +0,0 @@ -# get-dataset-criteo -Automatically generated README for this automation recipe: **get-dataset-criteo** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-criteo/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-criteo/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get dataset criteo original" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,dataset,criteo,original[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get dataset criteo original [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,dataset,criteo,original' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get dataset criteo original[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_backup` - - ENV variables: - - CM_BACKUP_ZIPS: `yes` - * `_fake` - - ENV variables: - - CM_CRITEO_FAKE: `yes` - -
- -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--criteo_path=value` → `CM_CRITEO_PATH=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_BACKUP_ZIPS: `no` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-criteo/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get dataset criteo original [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-dataset-imagenet-aux/index.md b/docs/scripts/AI-ML-datasets/get-dataset-imagenet-aux/index.md deleted file mode 100644 index 1abab6599..000000000 --- a/docs/scripts/AI-ML-datasets/get-dataset-imagenet-aux/index.md +++ /dev/null @@ -1,119 +0,0 @@ -# get-dataset-imagenet-aux -Automatically generated README for this automation recipe: **get-dataset-imagenet-aux** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-aux/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get aux dataset-aux image-classification imagenet-aux" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,aux,dataset-aux,image-classification,imagenet-aux[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get aux dataset-aux image-classification imagenet-aux [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,aux,dataset-aux,image-classification,imagenet-aux' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get aux dataset-aux image-classification imagenet-aux[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_2012` - - ENV variables: - - CM_DATASET_AUX_VER: `2012` - -
- - - * Group "**download-source**" -
- Click here to expand this section. - - * `_from.berkeleyvision` - - ENV variables: - - CM_WGET_URL: `http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz` - * **`_from.dropbox`** (default) - - ENV variables: - - CM_WGET_URL: `https://www.dropbox.com/s/92n2fyej3lzy3s3/caffe_ilsvrc12.tar.gz` - -
- - - ##### Default variations - - `_from.dropbox` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-aux/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-aux/run.bat) -___ -#### Script output -```bash -cmr "get aux dataset-aux image-classification imagenet-aux [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-dataset-imagenet-calibration/index.md b/docs/scripts/AI-ML-datasets/get-dataset-imagenet-calibration/index.md deleted file mode 100644 index 7aae04d88..000000000 --- a/docs/scripts/AI-ML-datasets/get-dataset-imagenet-calibration/index.md +++ /dev/null @@ -1,104 +0,0 @@ -# get-dataset-imagenet-calibration -Automatically generated README for this automation recipe: **get-dataset-imagenet-calibration** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-calibration/_cm.yaml)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get dataset imagenet calibration" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,dataset,imagenet,calibration[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get dataset imagenet calibration [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,dataset,imagenet,calibration' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get dataset imagenet calibration[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**calibration-option**" -
- Click here to expand this section. - - * **`_mlperf.option1`** (default) - - ENV variables: - - CM_MLPERF_IMAGENET_CALIBRATION_OPTION: `one` - - CM_DOWNLOAD_CHECKSUM: `f09719174af3553119e2c621157773a6` - * `_mlperf.option2` - - ENV variables: - - CM_MLPERF_IMAGENET_CALIBRATION_OPTION: `two` - - CM_DOWNLOAD_CHECKSUM: `e44582af00e3b4fc3fac30efd6bdd05f` - -
- - - ##### Default variations - - `_mlperf.option1` - -___ -#### Script output -```bash -cmr "get dataset imagenet calibration [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-dataset-imagenet-helper/index.md b/docs/scripts/AI-ML-datasets/get-dataset-imagenet-helper/index.md deleted file mode 100644 index 48b39fa40..000000000 --- a/docs/scripts/AI-ML-datasets/get-dataset-imagenet-helper/index.md +++ /dev/null @@ -1,80 +0,0 @@ -# get-dataset-imagenet-helper -Automatically generated README for this automation recipe: **get-dataset-imagenet-helper** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-helper/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get imagenet helper imagenet-helper" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,imagenet,helper,imagenet-helper - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get imagenet helper imagenet-helper " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,imagenet,helper,imagenet-helper' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get imagenet helper imagenet-helper" - ``` -___ - - -___ -#### Script output -```bash -cmr "get imagenet helper imagenet-helper " -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-dataset-imagenet-train/index.md b/docs/scripts/AI-ML-datasets/get-dataset-imagenet-train/index.md deleted file mode 100644 index 2b8bb952f..000000000 --- a/docs/scripts/AI-ML-datasets/get-dataset-imagenet-train/index.md +++ /dev/null @@ -1,96 +0,0 @@ -# get-dataset-imagenet-train -Automatically generated README for this automation recipe: **get-dataset-imagenet-train** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-train/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get imagenet train dataset original" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,imagenet,train,dataset,original [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get imagenet train dataset original " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,imagenet,train,dataset,original' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get imagenet train dataset original" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--input=value` → `IMAGENET_TRAIN_PATH=value` - * `--torrent=value` → `CM_DATASET_IMAGENET_TRAIN_TORRENT_PATH=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-train/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get imagenet train dataset original " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-dataset-imagenet-val/index.md b/docs/scripts/AI-ML-datasets/get-dataset-imagenet-val/index.md deleted file mode 100644 index d9cd7b787..000000000 --- a/docs/scripts/AI-ML-datasets/get-dataset-imagenet-val/index.md +++ /dev/null @@ -1,149 +0,0 @@ -# get-dataset-imagenet-val -Automatically generated README for this automation recipe: **get-dataset-imagenet-val** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-val/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-val/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get val validation dataset imagenet ILSVRC image-classification original" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,val,validation,dataset,imagenet,ILSVRC,image-classification,original[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get val validation dataset imagenet ILSVRC image-classification original [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,val,validation,dataset,imagenet,ILSVRC,image-classification,original' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get val validation dataset imagenet ILSVRC image-classification original[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_2012-500` - * `_2012-full` - * `_run-during-docker-build` - -
- - - * Group "**count**" -
- Click here to expand this section. - - * `_full` - - ENV variables: - - CM_DATASET_SIZE: `50000` - - CM_IMAGENET_FULL: `yes` - - CM_DAE_FILENAME: `ILSVRC2012_img_val.tar` - - CM_DAE_DOWNLOADED_CHECKSUM: `29b22e2961454d5413ddabcf34fc5622` - * `_size.#` - - ENV variables: - - CM_DATASET_SIZE: `#` - * **`_size.500`** (default) - - ENV variables: - - CM_DATASET_SIZE: `500` - - CM_DAE_FILENAME: `ILSVRC2012_img_val_500.tar` - - CM_DAE_URL: `http://cKnowledge.org/ai/data/ILSVRC2012_img_val_500.tar` - -
- - - * Group "**dataset-version**" -
- Click here to expand this section. - - * **`_2012`** (default) - - ENV variables: - - CM_DATASET_VER: `2012` - -
- - - ##### Default variations - - `_2012,_size.500` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--imagenet_path=value` → `IMAGENET_PATH=value` - * `--torrent=value` → `CM_DATASET_IMAGENET_VAL_TORRENT_PATH=value` - - - - -#### Native script being run -=== "Linux/macOS" - No run file exists for Linux/macOS -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-imagenet-val/run.bat) -___ -#### Script output -```bash -cmr "get val validation dataset imagenet ILSVRC image-classification original [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-dataset-kits19/index.md b/docs/scripts/AI-ML-datasets/get-dataset-kits19/index.md deleted file mode 100644 index 5010afffc..000000000 --- a/docs/scripts/AI-ML-datasets/get-dataset-kits19/index.md +++ /dev/null @@ -1,138 +0,0 @@ -# get-dataset-kits19 -Automatically generated README for this automation recipe: **get-dataset-kits19** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-kits19/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get dataset medical-imaging kits original kits19" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,dataset,medical-imaging,kits,original,kits19[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get dataset medical-imaging kits original kits19 [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,dataset,medical-imaging,kits,original,kits19' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get dataset medical-imaging kits original kits19[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_calibration` - - ENV variables: - - CM_DATASET_CALIBRATION: `yes` - * `_default` - - ENV variables: - - CM_GIT_PATCH: `no` - * `_full-history` - - ENV variables: - - CM_GIT_DEPTH: `` - * `_no-recurse-submodules` - - ENV variables: - - CM_GIT_RECURSE_SUBMODULES: `` - * `_patch` - - ENV variables: - - CM_GIT_PATCH: `yes` - * `_short-history` - - ENV variables: - - CM_GIT_DEPTH: `--depth 5` - * `_validation` - - ENV variables: - - CM_DATASET_VALIDATION: `yes` - -
- -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_GIT_CHECKOUT: `master` - * CM_GIT_DEPTH: `--depth 2` - * CM_GIT_PATCH: `no` - * CM_GIT_RECURSE_SUBMODULES: `` - * CM_GIT_URL: `https://github.com/neheller/kits19` - - -#### Versions -Default version: `master` - -* `custom` -* `master` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-kits19/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get dataset medical-imaging kits original kits19 [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-dataset-librispeech/index.md b/docs/scripts/AI-ML-datasets/get-dataset-librispeech/index.md deleted file mode 100644 index 05be625ad..000000000 --- a/docs/scripts/AI-ML-datasets/get-dataset-librispeech/index.md +++ /dev/null @@ -1,97 +0,0 @@ -# get-dataset-librispeech -Automatically generated README for this automation recipe: **get-dataset-librispeech** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-librispeech/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-librispeech/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get dataset speech speech-recognition librispeech validation audio training original" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,dataset,speech,speech-recognition,librispeech,validation,audio,training,original - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get dataset speech speech-recognition librispeech validation audio training original " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,dataset,speech,speech-recognition,librispeech,validation,audio,training,original' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get dataset speech speech-recognition librispeech validation audio training original" - ``` -___ - -#### Versions -Default version: `dev-clean` - -* `dev-clean` -* `dev-other` -* `test-clean` -* `test-other` -* `train-clean-100` -* `train-clean-360` -* `train-other-500` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-librispeech/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get dataset speech speech-recognition librispeech validation audio training original " -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-dataset-openimages-annotations/index.md b/docs/scripts/AI-ML-datasets/get-dataset-openimages-annotations/index.md deleted file mode 100644 index 05578105c..000000000 --- a/docs/scripts/AI-ML-datasets/get-dataset-openimages-annotations/index.md +++ /dev/null @@ -1,105 +0,0 @@ -# get-dataset-openimages-annotations -Automatically generated README for this automation recipe: **get-dataset-openimages-annotations** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openimages-annotations/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get aux dataset-aux object-detection openimages annotations" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,aux,dataset-aux,object-detection,openimages,annotations[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get aux dataset-aux object-detection openimages annotations [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,aux,dataset-aux,object-detection,openimages,annotations' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get aux dataset-aux object-detection openimages annotations[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**download-source**" -
- Click here to expand this section. - - * **`_from.github`** (default) - - ENV variables: - - CM_WGET_URL: `https://github.com/mlcommons/inference/releases/download/v2.1/openimages-mlperf_annotations_2.1.json.zip` - -
- - - ##### Default variations - - `_from.github` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openimages-annotations/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get aux dataset-aux object-detection openimages annotations [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-dataset-openimages-calibration/index.md b/docs/scripts/AI-ML-datasets/get-dataset-openimages-calibration/index.md deleted file mode 100644 index 6e634f401..000000000 --- a/docs/scripts/AI-ML-datasets/get-dataset-openimages-calibration/index.md +++ /dev/null @@ -1,131 +0,0 @@ -# get-dataset-openimages-calibration -Automatically generated README for this automation recipe: **get-dataset-openimages-calibration** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openimages-calibration/_cm.yaml)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get dataset openimages calibration" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,dataset,openimages,calibration[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get dataset openimages calibration [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,dataset,openimages,calibration' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get dataset openimages calibration[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_filter` - - ENV variables: - - CM_CALIBRATE_FILTER: `yes` - -
- - - * Group "**calibration-option**" -
- Click here to expand this section. - - * **`_mlperf.option1`** (default) - - ENV variables: - - CM_MLPERF_OPENIMAGES_CALIBRATION_OPTION: `one` - - CM_DOWNLOAD_CHECKSUM1: `f09719174af3553119e2c621157773a6` - -
- - - * Group "**filter-size**" -
- Click here to expand this section. - - * `_filter-size.#` - - ENV variables: - - CM_CALIBRATION_FILTER_SIZE: `#` - * `_filter-size.400` - - ENV variables: - - CM_CALIBRATION_FILTER_SIZE: `400` - -
- - - ##### Default variations - - `_mlperf.option1` - -#### Native script being run -=== "Linux/macOS" - * [run-filter.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openimages-calibration/run-filter.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get dataset openimages calibration [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-dataset-openimages/index.md b/docs/scripts/AI-ML-datasets/get-dataset-openimages/index.md deleted file mode 100644 index 5c9e2fa59..000000000 --- a/docs/scripts/AI-ML-datasets/get-dataset-openimages/index.md +++ /dev/null @@ -1,164 +0,0 @@ -# get-dataset-openimages -Automatically generated README for this automation recipe: **get-dataset-openimages** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openimages/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openimages/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get dataset openimages open-images object-detection original" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,dataset,openimages,open-images,object-detection,original[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get dataset openimages open-images object-detection original [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,dataset,openimages,open-images,object-detection,original' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get dataset openimages open-images object-detection original[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_filter` - * `_filter-size.#` - * `_using-fiftyone` - -
- - - * Group "**annotations**" -
- Click here to expand this section. - - * `_custom-annotations` - - ENV variables: - - CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS: `yes` - * **`_default-annotations`** (default) - - ENV variables: - - CM_DATASET_OPENIMAGES_CUSTOM_ANNOTATIONS: `no` - -
- - - * Group "**dataset-type**" -
- Click here to expand this section. - - * `_calibration` - - ENV variables: - - CM_DATASET_CALIBRATION: `yes` - * **`_validation`** (default) - - ENV variables: - - CM_DATASET_CALIBRATION: `no` - -
- - - * Group "**size**" -
- Click here to expand this section. - - * **`_50`** (default) - - ENV variables: - - CM_DATASET_SIZE: `50` - * `_500` - - ENV variables: - - CM_DATASET_SIZE: `500` - * `_full` - - ENV variables: - - CM_DATASET_SIZE: `` - * `_size.#` - - ENV variables: - - CM_DATASET_SIZE: `#` - -
- - - ##### Default variations - - `_50,_default-annotations,_validation` -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_DATASET_CALIBRATION: `no` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openimages/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openimages/run.bat) -___ -#### Script output -```bash -cmr "get dataset openimages open-images object-detection original [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-dataset-openorca/index.md b/docs/scripts/AI-ML-datasets/get-dataset-openorca/index.md deleted file mode 100644 index a437ae42c..000000000 --- a/docs/scripts/AI-ML-datasets/get-dataset-openorca/index.md +++ /dev/null @@ -1,132 +0,0 @@ -# get-dataset-openorca -Automatically generated README for this automation recipe: **get-dataset-openorca** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-openorca/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get dataset openorca language-processing original" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,dataset,openorca,language-processing,original[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get dataset openorca language-processing original [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,dataset,openorca,language-processing,original' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get dataset openorca language-processing original[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**dataset-type**" -
- Click here to expand this section. - - * `_calibration` - - ENV variables: - - CM_DATASET_CALIBRATION: `yes` - * **`_validation`** (default) - - ENV variables: - - CM_DATASET_CALIBRATION: `no` - -
- - - * Group "**size**" -
- Click here to expand this section. - - * `_500` - - ENV variables: - - CM_DATASET_SIZE: `500` - * **`_60`** (default) - - ENV variables: - - CM_DATASET_SIZE: `60` - * `_full` - - ENV variables: - - CM_DATASET_SIZE: `24576` - * `_size.#` - - ENV variables: - - CM_DATASET_SIZE: `#` - -
- - - ##### Default variations - - `_60,_validation` -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_DATASET_CALIBRATION: `no` - - - -___ -#### Script output -```bash -cmr "get dataset openorca language-processing original [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-dataset-squad-vocab/index.md b/docs/scripts/AI-ML-datasets/get-dataset-squad-vocab/index.md deleted file mode 100644 index 30e0fbeee..000000000 --- a/docs/scripts/AI-ML-datasets/get-dataset-squad-vocab/index.md +++ /dev/null @@ -1,105 +0,0 @@ -# get-dataset-squad-vocab -Automatically generated README for this automation recipe: **get-dataset-squad-vocab** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-squad-vocab/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get aux dataset-aux language-processing squad-aux vocab squad-vocab" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,aux,dataset-aux,language-processing,squad-aux,vocab,squad-vocab[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get aux dataset-aux language-processing squad-aux vocab squad-vocab [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,aux,dataset-aux,language-processing,squad-aux,vocab,squad-vocab' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get aux dataset-aux language-processing squad-aux vocab squad-vocab[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**download-source**" -
- Click here to expand this section. - - * **`_from.zenodo`** (default) - - ENV variables: - - CM_WGET_URL: `https://zenodo.org/record/3733868/files/vocab.txt` - -
- - - ##### Default variations - - `_from.zenodo` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-squad-vocab/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get aux dataset-aux language-processing squad-aux vocab squad-vocab [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-dataset-squad/index.md b/docs/scripts/AI-ML-datasets/get-dataset-squad/index.md deleted file mode 100644 index 554e79a57..000000000 --- a/docs/scripts/AI-ML-datasets/get-dataset-squad/index.md +++ /dev/null @@ -1,92 +0,0 @@ -# get-dataset-squad -Automatically generated README for this automation recipe: **get-dataset-squad** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-squad/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-squad/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get dataset squad language-processing validation original" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,dataset,squad,language-processing,validation,original - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get dataset squad language-processing validation original " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,dataset,squad,language-processing,validation,original' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get dataset squad language-processing validation original" - ``` -___ - -#### Versions -Default version: `1.1` - -* `1.1` -* `2.0` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dataset-squad/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get dataset squad language-processing validation original " -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-criteo/index.md b/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-criteo/index.md deleted file mode 100644 index c75f70bbf..000000000 --- a/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-criteo/index.md +++ /dev/null @@ -1,137 +0,0 @@ -# get-preprocessed-dataset-criteo -Automatically generated README for this automation recipe: **get-preprocessed-dataset-criteo** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-criteo/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-criteo/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get dataset criteo recommendation dlrm preprocessed" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,dataset,criteo,recommendation,dlrm,preprocessed[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get dataset criteo recommendation dlrm preprocessed [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,dataset,criteo,recommendation,dlrm,preprocessed' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get dataset criteo recommendation dlrm preprocessed[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_1` - - ENV variables: - - CM_DATASET_SIZE: `1` - * `_50` - - ENV variables: - - CM_DATASET_SIZE: `50` - * `_fake` - - ENV variables: - - CM_CRITEO_FAKE: `yes` - * `_full` - * `_validation` - -
- - - * Group "**type**" -
- Click here to expand this section. - - * **`_multihot`** (default) - - ENV variables: - - CM_DATASET_CRITEO_MULTIHOT: `yes` - -
- - - ##### Default variations - - `_multihot` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--dir=value` → `CM_DATASET_PREPROCESSED_PATH=value` - * `--output_dir=value` → `CM_DATASET_PREPROCESSED_OUTPUT_PATH=value` - * `--threads=value` → `CM_NUM_PREPROCESS_THREADS=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run-multihot.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-criteo/run-multihot.sh) - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-criteo/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get dataset criteo recommendation dlrm preprocessed [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-generic/index.md b/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-generic/index.md deleted file mode 100644 index 844e2c2e8..000000000 --- a/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-generic/index.md +++ /dev/null @@ -1,80 +0,0 @@ -# get-preprocesser-script-generic -Automatically generated README for this automation recipe: **get-preprocesser-script-generic** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocesser-script-generic/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get preprocessor generic image-preprocessor script" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,preprocessor,generic,image-preprocessor,script - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get preprocessor generic image-preprocessor script " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,preprocessor,generic,image-preprocessor,script' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get preprocessor generic image-preprocessor script" - ``` -___ - - -___ -#### Script output -```bash -cmr "get preprocessor generic image-preprocessor script " -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-imagenet/index.md b/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-imagenet/index.md deleted file mode 100644 index c4bee08bc..000000000 --- a/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-imagenet/index.md +++ /dev/null @@ -1,301 +0,0 @@ -# get-preprocessed-dataset-imagenet -Automatically generated README for this automation recipe: **get-preprocessed-dataset-imagenet** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-imagenet/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-imagenet/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get dataset imagenet ILSVRC image-classification preprocessed" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,dataset,imagenet,ILSVRC,image-classification,preprocessed[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get dataset imagenet ILSVRC image-classification preprocessed [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,dataset,imagenet,ILSVRC,image-classification,preprocessed' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get dataset imagenet ILSVRC image-classification preprocessed[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_default` - * `_pytorch` - - ENV variables: - - CM_PREPROCESS_PYTORCH: `yes` - - CM_MODEL: `resnet50` - * `_tflite_tpu` - - ENV variables: - - CM_MODEL: `resnet50` - - CM_PREPROCESS_TFLITE_TPU: `yes` - -
- - - * Group "**calibration-option**" -
- Click here to expand this section. - - * `_mlperf.option1` - - ENV variables: - - CM_DATASET_CALIBRATION_OPTION: `one` - * `_mlperf.option2` - - ENV variables: - - CM_DATASET_CALIBRATION_OPTION: `two` - -
- - - * Group "**dataset-type**" -
- Click here to expand this section. - - * `_calibration` - - ENV variables: - - CM_DATASET_TYPE: `calibration` - * **`_validation`** (default) - - ENV variables: - - CM_DATASET_TYPE: `validation` - -
- - - * Group "**extension**" -
- Click here to expand this section. - - * `_rgb32` - - ENV variables: - - CM_DATASET_PREPROCESSED_EXTENSION: `rgb32` - * `_rgb8` - - ENV variables: - - CM_DATASET_PREPROCESSED_EXTENSION: `rgb8` - -
- - - * Group "**interpolation-method**" -
- Click here to expand this section. - - * `_inter.area` - - ENV variables: - - CM_DATASET_INTERPOLATION_METHOD: `INTER_AREA` - * `_inter.linear` - - ENV variables: - - CM_DATASET_INTERPOLATION_METHOD: `INTER_LINEAR` - -
- - - * Group "**layout**" -
- Click here to expand this section. - - * **`_NCHW`** (default) - - ENV variables: - - CM_DATASET_DATA_LAYOUT: `NCHW` - * `_NHWC` - - ENV variables: - - CM_DATASET_DATA_LAYOUT: `NHWC` - -
- - - * Group "**model**" -
- Click here to expand this section. - - * `_for.mobilenet` - * `_for.resnet50` - - ENV variables: - - CM_DATASET_SUBTRACT_MEANS: `1` - - CM_DATASET_GIVEN_CHANNEL_MEANS: `123.68 116.78 103.94` - - CM_DATASET_NORMALIZE_DATA: `0` - - CM_DATASET_INTERPOLATION_METHOD: `INTER_AREA` - -
- - - * Group "**precision**" -
- Click here to expand this section. - - * `_float32` - - ENV variables: - - CM_DATASET_DATA_TYPE: `float32` - - CM_DATASET_QUANTIZE: `0` - - CM_DATASET_CONVERT_TO_UNSIGNED: `0` - * `_int8` - - ENV variables: - - CM_DATASET_DATA_TYPE: `int8` - - CM_DATASET_QUANTIZE: `1` - - CM_DATASET_CONVERT_TO_UNSIGNED: `0` - * `_uint8` - - ENV variables: - - CM_DATASET_DATA_TYPE: `uint8` - - CM_DATASET_DATA_TYPE_INPUT: `float32` - - CM_DATASET_QUANTIZE: `1` - - CM_DATASET_CONVERT_TO_UNSIGNED: `1` - -
- - - * Group "**preprocessing-source**" -
- Click here to expand this section. - - * `_generic-preprocessor` - - ENV variables: - - CM_DATASET_REFERENCE_PREPROCESSOR: `0` - * **`_mlcommons-reference-preprocessor`** (default) - - ENV variables: - - CM_DATASET_REFERENCE_PREPROCESSOR: `1` - -
- - - * Group "**resolution**" -
- Click here to expand this section. - - * `_resolution.#` - - ENV variables: - - CM_DATASET_INPUT_SQUARE_SIDE: `#` - * **`_resolution.224`** (default) - - ENV variables: - - CM_DATASET_INPUT_SQUARE_SIDE: `224` - -
- - - * Group "**size**" -
- Click here to expand this section. - - * `_1` - - ENV variables: - - CM_DATASET_SIZE: `1` - * `_500` - - ENV variables: - - CM_DATASET_SIZE: `500` - * `_full` - - ENV variables: - - CM_DATASET_SIZE: `50000` - * `_size.#` - - ENV variables: - - CM_DATASET_SIZE: `#` - -
- - - ##### Default variations - - `_NCHW,_mlcommons-reference-preprocessor,_resolution.224,_validation` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--dir=value` → `CM_DATASET_PREPROCESSED_PATH=value` - * `--imagenet_path=value` → `CM_IMAGENET_PATH=value` - * `--imagenet_preprocessed_path=value` → `CM_IMAGENET_PREPROCESSED_PATH=value` - * `--threads=value` → `CM_NUM_PREPROCESS_THREADS=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_DATASET_CROP_FACTOR: `87.5` - * CM_DATASET_DATA_TYPE: `float32` - * CM_DATASET_DATA_LAYOUT: `NCHW` - * CM_DATASET_QUANT_SCALE: `1` - * CM_DATASET_QUANTIZE: `0` - * CM_DATASET_QUANT_OFFSET: `0` - * CM_DATASET_PREPROCESSED_EXTENSION: `npy` - * CM_DATASET_CONVERT_TO_UNSIGNED: `0` - * CM_DATASET_REFERENCE_PREPROCESSOR: `1` - * CM_PREPROCESS_VGG: `yes` - * CM_MODEL: `resnet50` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-imagenet/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-imagenet/run.bat) -___ -#### Script output -```bash -cmr "get dataset imagenet ILSVRC image-classification preprocessed [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-kits19/index.md b/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-kits19/index.md deleted file mode 100644 index d2a985eca..000000000 --- a/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-kits19/index.md +++ /dev/null @@ -1,175 +0,0 @@ -# get-preprocessed-dataset-kits19 -Automatically generated README for this automation recipe: **get-preprocessed-dataset-kits19** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-kits19/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get dataset medical-imaging kits19 preprocessed" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,dataset,medical-imaging,kits19,preprocessed[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get dataset medical-imaging kits19 preprocessed [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,dataset,medical-imaging,kits19,preprocessed' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get dataset medical-imaging kits19 preprocessed[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_nvidia` - - ENV variables: - - CM_PREPROCESSING_BY_NVIDIA: `yes` - -
- - - * Group "**dataset-count**" -
- Click here to expand this section. - - * `_1` - - ENV variables: - - CM_DATASET_SIZE: `1` - * `_5` - - ENV variables: - - CM_DATASET_SIZE: `5` - * `_50` - - ENV variables: - - CM_DATASET_SIZE: `50` - * `_500` - - ENV variables: - - CM_DATASET_SIZE: `500` - * `_full` - - ENV variables: - - CM_DATASET_SIZE: `` - -
- - - * Group "**dataset-precision**" -
- Click here to expand this section. - - * **`_fp32`** (default) - - ENV variables: - - CM_DATASET_DTYPE: `fp32` - * `_int8` - - ENV variables: - - CM_DATASET_DTYPE: `int8` - -
- - - * Group "**dataset-type**" -
- Click here to expand this section. - - * `_calibration` - - ENV variables: - - CM_DATASET_PATH: `<<>>` - * **`_validation`** (default) - -
- - - ##### Default variations - - `_fp32,_validation` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--dir=value` → `CM_DATASET_PREPROCESSED_PATH=value` - * `--threads=value` → `CM_NUM_PREPROCESS_THREADS=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_DATASET: `kits19` - * CM_DATASET_DTYPE: `fp32` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-kits19/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get dataset medical-imaging kits19 preprocessed [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-librispeech/index.md b/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-librispeech/index.md deleted file mode 100644 index f683a8f52..000000000 --- a/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-librispeech/index.md +++ /dev/null @@ -1,164 +0,0 @@ -# get-preprocessed-dataset-librispeech -Automatically generated README for this automation recipe: **get-preprocessed-dataset-librispeech** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-librispeech/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get dataset speech-recognition librispeech preprocessed" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,dataset,speech-recognition,librispeech,preprocessed[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get dataset speech-recognition librispeech preprocessed [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,dataset,speech-recognition,librispeech,preprocessed' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get dataset speech-recognition librispeech preprocessed[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**dataset-count**" -
- Click here to expand this section. - - * `_1` - - ENV variables: - - CM_DATASET_SIZE: `1` - * `_5` - - ENV variables: - - CM_DATASET_SIZE: `5` - * `_50` - - ENV variables: - - CM_DATASET_SIZE: `50` - * `_500` - - ENV variables: - - CM_DATASET_SIZE: `500` - * `_full` - - ENV variables: - - CM_DATASET_SIZE: `` - -
- - - * Group "**dataset-precision**" -
- Click here to expand this section. - - * **`_fp32`** (default) - - ENV variables: - - CM_DATASET_DTYPE: `fp32` - * `_int8` - - ENV variables: - - CM_DATASET_DTYPE: `int8` - -
- - - * Group "**dataset-type**" -
- Click here to expand this section. - - * `_calibration` - - ENV variables: - - CM_DATASET_PATH: `<<>>` - * **`_validation`** (default) - -
- - - ##### Default variations - - `_fp32,_validation` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--dir=value` → `CM_DATASET_PREPROCESSED_PATH=value` - * `--threads=value` → `CM_NUM_PREPROCESS_THREADS=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_DATASET: `kits19` - * CM_DATASET_DTYPE: `fp32` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-librispeech/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get dataset speech-recognition librispeech preprocessed [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-openimages/index.md b/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-openimages/index.md deleted file mode 100644 index 9bbe30eec..000000000 --- a/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-openimages/index.md +++ /dev/null @@ -1,287 +0,0 @@ -# get-preprocessed-dataset-openimages -Automatically generated README for this automation recipe: **get-preprocessed-dataset-openimages** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-openimages/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-openimages/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get dataset openimages open-images object-detection preprocessed" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,dataset,openimages,open-images,object-detection,preprocessed[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get dataset openimages open-images object-detection preprocessed [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,dataset,openimages,open-images,object-detection,preprocessed' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get dataset openimages open-images object-detection preprocessed[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_filter` - * `_for.retinanet.onnx` - - ENV variables: - - CM_ML_MODEL_NAME: `retinanet` - - CM_DATASET_SUBTRACT_MEANS: `1` - - CM_DATASET_GIVEN_CHANNEL_MEANS: `0.485 0.456 0.406` - - CM_DATASET_GIVEN_CHANNEL_STDS: `0.229 0.224 0.225` - - CM_DATASET_NORMALIZE_DATA: `0` - - CM_DATASET_NORMALIZE_LOWER: `0.0` - - CM_DATASET_NORMALIZE_UPPER: `1.0` - - CM_DATASET_CONVERT_TO_BGR: `0` - - CM_DATASET_CROP_FACTOR: `100.0` - * `_nvidia` - - ENV variables: - - CM_PREPROCESSING_BY_NVIDIA: `yes` - * `_quant-offset.#` - * `_quant-scale.#` - -
- - - * Group "**annotations**" -
- Click here to expand this section. - - * `_custom-annotations` - * **`_default-annotations`** (default) - -
- - - * Group "**dataset-count**" -
- Click here to expand this section. - - * **`_50`** (default) - - ENV variables: - - CM_DATASET_SIZE: `50` - * `_500` - - ENV variables: - - CM_DATASET_SIZE: `500` - * `_full` - * `_size.#` - - ENV variables: - - CM_DATASET_SIZE: `#` - -
- - - * Group "**dataset-layout**" -
- Click here to expand this section. - - * **`_NCHW`** (default) - - ENV variables: - - CM_DATASET_DATA_LAYOUT: `NCHW` - * `_NHWC` - - ENV variables: - - CM_DATASET_DATA_LAYOUT: `NHWC` - -
- - - * Group "**dataset-precision**" -
- Click here to expand this section. - - * **`_fp32`** (default) - - ENV variables: - - CM_DATASET_DTYPE: `fp32` - - CM_DATASET_INPUT_DTYPE: `fp32` - - CM_DATASET_QUANTIZE: `0` - - CM_DATASET_CONVERT_TO_UNSIGNED: `0` - * `_int8` - - ENV variables: - - CM_DATASET_DTYPE: `int8` - - CM_DATASET_INPUT_DTYPE: `fp32` - - CM_DATASET_QUANTIZE: `1` - - CM_DATASET_CONVERT_TO_UNSIGNED: `0` - * `_uint8` - - ENV variables: - - CM_DATASET_DTYPE: `uint8` - - CM_DATASET_INPUT_DTYPE: `fp32` - - CM_DATASET_QUANTIZE: `1` - - CM_DATASET_CONVERT_TO_UNSIGNED: `1` - -
- - - * Group "**dataset-type**" -
- Click here to expand this section. - - * `_calibration` - - ENV variables: - - CM_DATASET_PATH: `<<>>` - - CM_DATASET_ANNOTATIONS_FILE_PATH: `<<>>` - - CM_DATASET_TYPE: `calibration` - * **`_validation`** (default) - - ENV variables: - - CM_DATASET_TYPE: `validation` - -
- - - * Group "**extension**" -
- Click here to expand this section. - - * `_npy` - - ENV variables: - - CM_DATASET_PREPROCESSED_EXTENSION: `npy` - * `_raw` - - ENV variables: - - CM_DATASET_PREPROCESSED_EXTENSION: `raw` - * `_rgb32` - - ENV variables: - - CM_DATASET_PREPROCESSED_EXTENSION: `rgb32` - * `_rgb8` - - ENV variables: - - CM_DATASET_PREPROCESSED_EXTENSION: `rgb8` - -
- - - * Group "**filter-size**" -
- Click here to expand this section. - - * `_filter-size.#` - -
- - - * Group "**interpolation-method**" -
- Click here to expand this section. - - * `_inter.area` - - ENV variables: - - CM_DATASET_INTERPOLATION_METHOD: `INTER_AREA` - * `_inter.linear` - - ENV variables: - - CM_DATASET_INTERPOLATION_METHOD: `INTER_LINEAR` - -
- - - * Group "**preprocessing-source**" -
- Click here to expand this section. - - * `_generic-preprocessor` - - ENV variables: - - CM_DATASET_REFERENCE_PREPROCESSOR: `0` - * **`_mlcommons-reference-preprocessor`** (default) - - ENV variables: - - CM_DATASET_REFERENCE_PREPROCESSOR: `1` - -
- - - ##### Default variations - - `_50,_NCHW,_default-annotations,_fp32,_mlcommons-reference-preprocessor,_validation` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--dir=value` → `CM_DATASET_PREPROCESSED_PATH=value` - * `--threads=value` → `CM_NUM_PREPROCESS_THREADS=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_DATASET: `OPENIMAGES` - * CM_DATASET_DTYPE: `fp32` - * CM_DATASET_INPUT_SQUARE_SIDE: `800` - * CM_DATASET_CROP_FACTOR: `100.0` - * CM_DATASET_QUANT_SCALE: `1` - * CM_DATASET_QUANTIZE: `0` - * CM_DATASET_QUANT_OFFSET: `0` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-openimages/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-openimages/run.bat) -___ -#### Script output -```bash -cmr "get dataset openimages open-images object-detection preprocessed [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-openorca/index.md b/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-openorca/index.md deleted file mode 100644 index 5232eaf72..000000000 --- a/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-openorca/index.md +++ /dev/null @@ -1,129 +0,0 @@ -# get-preprocessed-dataset-openorca -Automatically generated README for this automation recipe: **get-preprocessed-dataset-openorca** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-openorca/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get dataset openorca language-processing preprocessed" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,dataset,openorca,language-processing,preprocessed[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get dataset openorca language-processing preprocessed [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,dataset,openorca,language-processing,preprocessed' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get dataset openorca language-processing preprocessed[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**dataset-type**" -
- Click here to expand this section. - - * `_calibration` - - ENV variables: - - CM_DATASET_CALIBRATION: `yes` - * **`_validation`** (default) - - ENV variables: - - CM_DATASET_CALIBRATION: `no` - -
- - - * Group "**size**" -
- Click here to expand this section. - - * **`_60`** (default) - * `_full` - * `_size.#` - -
- - - ##### Default variations - - `_60,_validation` -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_DATASET_CALIBRATION: `no` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-openorca/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get dataset openorca language-processing preprocessed [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-squad/index.md b/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-squad/index.md deleted file mode 100644 index 422bbd911..000000000 --- a/docs/scripts/AI-ML-datasets/get-preprocessed-dataset-squad/index.md +++ /dev/null @@ -1,165 +0,0 @@ -# get-preprocessed-dataset-squad -Automatically generated README for this automation recipe: **get-preprocessed-dataset-squad** - -Category: **[AI/ML datasets](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-squad/_cm.yaml)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get dataset preprocessed tokenized squad" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,dataset,preprocessed,tokenized,squad[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get dataset preprocessed tokenized squad [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,dataset,preprocessed,tokenized,squad' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get dataset preprocessed tokenized squad[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**calibration-set**" -
- Click here to expand this section. - - * `_calib1` - - ENV variables: - - CM_DATASET_SQUAD_CALIBRATION_SET: `one` - * `_calib2` - - ENV variables: - - CM_DATASET_SQUAD_CALIBRATION_SET: `two` - * **`_no-calib`** (default) - - ENV variables: - - CM_DATASET_SQUAD_CALIBRATION_SET: `` - -
- - - * Group "**doc-stride**" -
- Click here to expand this section. - - * `_doc-stride.#` - - ENV variables: - - CM_DATASET_DOC_STRIDE: `#` - * **`_doc-stride.128`** (default) - - ENV variables: - - CM_DATASET_DOC_STRIDE: `128` - -
- - - * Group "**packing**" -
- Click here to expand this section. - - * `_packed` - - ENV variables: - - CM_DATASET_SQUAD_PACKED: `yes` - -
- - - * Group "**raw**" -
- Click here to expand this section. - - * `_pickle` - - ENV variables: - - CM_DATASET_RAW: `no` - * **`_raw`** (default) - - ENV variables: - - CM_DATASET_RAW: `yes` - -
- - - * Group "**seq-length**" -
- Click here to expand this section. - - * `_seq-length.#` - - ENV variables: - - CM_DATASET_MAX_SEQ_LENGTH: `#` - * **`_seq-length.384`** (default) - - ENV variables: - - CM_DATASET_MAX_SEQ_LENGTH: `384` - -
- - - ##### Default variations - - `_doc-stride.128,_no-calib,_raw,_seq-length.384` - -#### Native script being run -=== "Linux/macOS" - * [run-packed.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-squad/run-packed.sh) - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-preprocessed-dataset-squad/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get dataset preprocessed tokenized squad [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-datasets/index.md b/docs/scripts/AI-ML-datasets/index.md deleted file mode 100644 index 8e94f6073..000000000 --- a/docs/scripts/AI-ML-datasets/index.md +++ /dev/null @@ -1,27 +0,0 @@ -* [get-croissant](get-croissant/index.md) -* [get-dataset-cifar10](get-dataset-cifar10/index.md) -* [get-dataset-cnndm](get-dataset-cnndm/index.md) -* [get-dataset-coco](get-dataset-coco/index.md) -* [get-dataset-coco2014](get-dataset-coco2014/index.md) -* [get-dataset-criteo](get-dataset-criteo/index.md) -* [get-dataset-imagenet-aux](get-dataset-imagenet-aux/index.md) -* [get-dataset-imagenet-calibration](get-dataset-imagenet-calibration/index.md) -* [get-dataset-imagenet-helper](get-dataset-imagenet-helper/index.md) -* [get-dataset-imagenet-train](get-dataset-imagenet-train/index.md) -* [get-dataset-imagenet-val](get-dataset-imagenet-val/index.md) -* [get-dataset-kits19](get-dataset-kits19/index.md) -* [get-dataset-librispeech](get-dataset-librispeech/index.md) -* [get-dataset-openimages](get-dataset-openimages/index.md) -* [get-dataset-openimages-annotations](get-dataset-openimages-annotations/index.md) -* [get-dataset-openimages-calibration](get-dataset-openimages-calibration/index.md) -* [get-dataset-openorca](get-dataset-openorca/index.md) -* [get-dataset-squad](get-dataset-squad/index.md) -* [get-dataset-squad-vocab](get-dataset-squad-vocab/index.md) -* [get-preprocessed-dataset-criteo](get-preprocessed-dataset-criteo/index.md) -* [get-preprocessed-dataset-imagenet](get-preprocessed-dataset-imagenet/index.md) -* [get-preprocessed-dataset-kits19](get-preprocessed-dataset-kits19/index.md) -* [get-preprocessed-dataset-librispeech](get-preprocessed-dataset-librispeech/index.md) -* [get-preprocessed-dataset-openimages](get-preprocessed-dataset-openimages/index.md) -* [get-preprocessed-dataset-openorca](get-preprocessed-dataset-openorca/index.md) -* [get-preprocessed-dataset-squad](get-preprocessed-dataset-squad/index.md) -* [get-preprocessed-dataset-generic](get-preprocessed-dataset-generic/index.md) diff --git a/docs/scripts/AI-ML-frameworks/get-google-saxml/index.md b/docs/scripts/AI-ML-frameworks/get-google-saxml/index.md deleted file mode 100644 index d0a9d4436..000000000 --- a/docs/scripts/AI-ML-frameworks/get-google-saxml/index.md +++ /dev/null @@ -1,89 +0,0 @@ -# get-google-saxml -Automatically generated README for this automation recipe: **get-google-saxml** - -Category: **[AI/ML frameworks](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-google-saxml/_cm.yaml)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get google saxml" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,google,saxml - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get google saxml " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,google,saxml' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get google saxml" - ``` -___ - -#### Versions -Default version: `master` - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-google-saxml/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-google-saxml/run.bat) -___ -#### Script output -```bash -cmr "get google saxml " -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-frameworks/get-onnxruntime-prebuilt/index.md b/docs/scripts/AI-ML-frameworks/get-onnxruntime-prebuilt/index.md deleted file mode 100644 index 04e0b0380..000000000 --- a/docs/scripts/AI-ML-frameworks/get-onnxruntime-prebuilt/index.md +++ /dev/null @@ -1,111 +0,0 @@ -# get-onnxruntime-prebuilt -Automatically generated README for this automation recipe: **get-onnxruntime-prebuilt** - -Category: **[AI/ML frameworks](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-onnxruntime-prebuilt/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install onnxruntime get prebuilt lib lang-c lang-cpp" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,onnxruntime,get,prebuilt,lib,lang-c,lang-cpp[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install onnxruntime get prebuilt lib lang-c lang-cpp [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,onnxruntime,get,prebuilt,lib,lang-c,lang-cpp' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install onnxruntime get prebuilt lib lang-c lang-cpp[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**device**" -
- Click here to expand this section. - - * **`_cpu`** (default) - - ENV variables: - - CM_ONNXRUNTIME_DEVICE: `` - * `_cuda` - - ENV variables: - - CM_ONNXRUNTIME_DEVICE: `gpu` - -
- - - ##### Default variations - - `_cpu` -#### Versions -Default version: `1.16.3` - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-onnxruntime-prebuilt/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-onnxruntime-prebuilt/run.bat) -___ -#### Script output -```bash -cmr "install onnxruntime get prebuilt lib lang-c lang-cpp [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-frameworks/get-qaic-apps-sdk/index.md b/docs/scripts/AI-ML-frameworks/get-qaic-apps-sdk/index.md deleted file mode 100644 index 613a95510..000000000 --- a/docs/scripts/AI-ML-frameworks/get-qaic-apps-sdk/index.md +++ /dev/null @@ -1,80 +0,0 @@ -# get-qaic-apps-sdk -Automatically generated README for this automation recipe: **get-qaic-apps-sdk** - -Category: **[AI/ML frameworks](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-qaic-apps-sdk/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get detect qaic apps sdk apps-sdk qaic-apps-sdk" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,detect,qaic,apps,sdk,apps-sdk,qaic-apps-sdk - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get detect qaic apps sdk apps-sdk qaic-apps-sdk " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,detect,qaic,apps,sdk,apps-sdk,qaic-apps-sdk' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get detect qaic apps sdk apps-sdk qaic-apps-sdk" - ``` -___ - - -___ -#### Script output -```bash -cmr "get detect qaic apps sdk apps-sdk qaic-apps-sdk " -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-frameworks/get-qaic-platform-sdk/index.md b/docs/scripts/AI-ML-frameworks/get-qaic-platform-sdk/index.md deleted file mode 100644 index 7a5599715..000000000 --- a/docs/scripts/AI-ML-frameworks/get-qaic-platform-sdk/index.md +++ /dev/null @@ -1,80 +0,0 @@ -# get-qaic-platform-sdk -Automatically generated README for this automation recipe: **get-qaic-platform-sdk** - -Category: **[AI/ML frameworks](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-qaic-platform-sdk/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get detect qaic platform sdk platform-sdk qaic-platform-sdk" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,detect,qaic,platform,sdk,platform-sdk,qaic-platform-sdk - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get detect qaic platform sdk platform-sdk qaic-platform-sdk " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,detect,qaic,platform,sdk,platform-sdk,qaic-platform-sdk' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get detect qaic platform sdk platform-sdk qaic-platform-sdk" - ``` -___ - - -___ -#### Script output -```bash -cmr "get detect qaic platform sdk platform-sdk qaic-platform-sdk " -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-frameworks/get-qaic-software-kit/index.md b/docs/scripts/AI-ML-frameworks/get-qaic-software-kit/index.md deleted file mode 100644 index 159dc0edd..000000000 --- a/docs/scripts/AI-ML-frameworks/get-qaic-software-kit/index.md +++ /dev/null @@ -1,119 +0,0 @@ -# get-qaic-software-kit -Automatically generated README for this automation recipe: **get-qaic-software-kit** - -Category: **[AI/ML frameworks](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-qaic-software-kit/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get qaic software kit qaic-software-kit" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,qaic,software,kit,qaic-software-kit[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get qaic software kit qaic-software-kit [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,qaic,software,kit,qaic-software-kit' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get qaic software kit qaic-software-kit[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_branch.#` - - ENV variables: - - CM_GIT_CHECKOUT: `#` - -
- - - * Group "**repo-source**" -
- Click here to expand this section. - - * `_repo.#` - - ENV variables: - - CM_GIT_URL: `#` - * **`_repo.quic`** (default) - - ENV variables: - - CM_GIT_URL: `https://github.com/quic/software-kit-for-qualcomm-cloud-ai-100` - -
- - - ##### Default variations - - `_repo.quic` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-qaic-software-kit/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get qaic software kit qaic-software-kit [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-frameworks/get-rocm/index.md b/docs/scripts/AI-ML-frameworks/get-rocm/index.md deleted file mode 100644 index c31689254..000000000 --- a/docs/scripts/AI-ML-frameworks/get-rocm/index.md +++ /dev/null @@ -1,86 +0,0 @@ -# get-rocm -Automatically generated README for this automation recipe: **get-rocm** - -Category: **[AI/ML frameworks](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-rocm/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get rocm get-rocm" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,rocm,get-rocm - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get rocm get-rocm " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,rocm,get-rocm' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get rocm get-rocm" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-rocm/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get rocm get-rocm " -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-frameworks/get-tvm/index.md b/docs/scripts/AI-ML-frameworks/get-tvm/index.md deleted file mode 100644 index da9315326..000000000 --- a/docs/scripts/AI-ML-frameworks/get-tvm/index.md +++ /dev/null @@ -1,141 +0,0 @@ -# get-tvm -Automatically generated README for this automation recipe: **get-tvm** - -Category: **[AI/ML frameworks](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tvm/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tvm/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get tvm get-tvm" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,tvm,get-tvm[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get tvm get-tvm [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,tvm,get-tvm' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get tvm get-tvm[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_cuda` - - ENV variables: - - CM_TVM_USE_CUDA: `yes` - * `_openmp` - - ENV variables: - - CM_TVM_USE_OPENMP: `yes` - -
- - - * Group "**installation-type**" -
- Click here to expand this section. - - * **`_llvm`** (default) - - ENV variables: - - CM_TVM_USE_LLVM: `yes` - * `_pip-install` - - ENV variables: - - CM_TVM_PIP_INSTALL: `yes` - -
- - - ##### Default variations - - `_llvm` -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_GIT_CHECKOUT: `main` - * CM_GIT_URL: `https://github.com/apache/tvm` - * CM_TVM_PIP_INSTALL: `no` - - -#### Versions -* `main` -* `v0.10.0` -* `v0.7.0` -* `v0.8.0` -* `v0.9.0` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tvm/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get tvm get-tvm [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-frameworks/index.md b/docs/scripts/AI-ML-frameworks/index.md deleted file mode 100644 index dd8814fe1..000000000 --- a/docs/scripts/AI-ML-frameworks/index.md +++ /dev/null @@ -1,12 +0,0 @@ -* [get-google-saxml](get-google-saxml/index.md) -* [get-onnxruntime-prebuilt](get-onnxruntime-prebuilt/index.md) -* [get-qaic-apps-sdk](get-qaic-apps-sdk/index.md) -* [get-qaic-platform-sdk](get-qaic-platform-sdk/index.md) -* [get-qaic-software-kit](get-qaic-software-kit/index.md) -* [get-rocm](get-rocm/index.md) -* [get-tvm](get-tvm/index.md) -* [install-qaic-compute-sdk-from-src](install-qaic-compute-sdk-from-src/index.md) -* [install-rocm](install-rocm/index.md) -* [install-tensorflow-for-c](install-tensorflow-for-c/index.md) -* [install-tensorflow-from-src](install-tensorflow-from-src/index.md) -* [install-tflite-from-src](install-tflite-from-src/index.md) diff --git a/docs/scripts/AI-ML-frameworks/install-qaic-compute-sdk-from-src/index.md b/docs/scripts/AI-ML-frameworks/install-qaic-compute-sdk-from-src/index.md deleted file mode 100644 index 0f04dc149..000000000 --- a/docs/scripts/AI-ML-frameworks/install-qaic-compute-sdk-from-src/index.md +++ /dev/null @@ -1,136 +0,0 @@ -# install-qaic-compute-sdk-from-src -Automatically generated README for this automation recipe: **install-qaic-compute-sdk-from-src** - -Category: **[AI/ML frameworks](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-qaic-compute-sdk-from-src/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get qaic from.src software compute compute-sdk qaic-compute-sdk sdk" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,qaic,from.src,software,compute,compute-sdk,qaic-compute-sdk,sdk[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get qaic from.src software compute compute-sdk qaic-compute-sdk sdk [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,qaic,from.src,software,compute,compute-sdk,qaic-compute-sdk,sdk' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get qaic from.src software compute compute-sdk qaic-compute-sdk sdk[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_branch.#` - - ENV variables: - - CM_GIT_CHECKOUT: `#` - -
- - - * Group "**installation-mode**" -
- Click here to expand this section. - - * `_debug` - - ENV variables: - - CM_QAIC_COMPUTE_SDK_INSTALL_MODE: `debug` - * **`_release`** (default) - - ENV variables: - - CM_QAIC_COMPUTE_SDK_INSTALL_MODE: `release` - * `_release-assert` - - ENV variables: - - CM_QAIC_COMPUTE_SDK_INSTALL_MODE: `release-assert` - -
- - - * Group "**repo-source**" -
- Click here to expand this section. - - * `_repo.#` - - ENV variables: - - CM_GIT_URL: `#` - * **`_repo.quic`** (default) - - ENV variables: - - CM_GIT_URL: `https://github.com/quic/software-kit-for-qualcomm-cloud-ai-100-cc` - -
- - - ##### Default variations - - `_release,_repo.quic` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-qaic-compute-sdk-from-src/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get qaic from.src software compute compute-sdk qaic-compute-sdk sdk [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-frameworks/install-rocm/index.md b/docs/scripts/AI-ML-frameworks/install-rocm/index.md deleted file mode 100644 index 74756c74d..000000000 --- a/docs/scripts/AI-ML-frameworks/install-rocm/index.md +++ /dev/null @@ -1,91 +0,0 @@ -# install-rocm -Automatically generated README for this automation recipe: **install-rocm** - -Category: **[AI/ML frameworks](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-rocm/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install rocm install-rocm" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,rocm,install-rocm - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install rocm install-rocm " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,rocm,install-rocm' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install rocm install-rocm" - ``` -___ - -#### Versions -Default version: `5.7.1` - - -#### Native script being run -=== "Linux/macOS" - * [run-rhel.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-rocm/run-rhel.sh) - * [run-ubuntu.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-rocm/run-ubuntu.sh) - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-rocm/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "install rocm install-rocm " -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-frameworks/install-tensorflow-for-c/index.md b/docs/scripts/AI-ML-frameworks/install-tensorflow-for-c/index.md deleted file mode 100644 index 0e1a158ea..000000000 --- a/docs/scripts/AI-ML-frameworks/install-tensorflow-for-c/index.md +++ /dev/null @@ -1,89 +0,0 @@ -# install-tensorflow-for-c -Automatically generated README for this automation recipe: **install-tensorflow-for-c** - -Category: **[AI/ML frameworks](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-tensorflow-for-c/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install tensorflow lib lang-c" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,tensorflow,lib,lang-c - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install tensorflow lib lang-c " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,tensorflow,lib,lang-c' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install tensorflow lib lang-c" - ``` -___ - -#### Versions -Default version: `2.8.0` - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-tensorflow-for-c/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "install tensorflow lib lang-c " -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-frameworks/install-tensorflow-from-src/index.md b/docs/scripts/AI-ML-frameworks/install-tensorflow-from-src/index.md deleted file mode 100644 index 36610c140..000000000 --- a/docs/scripts/AI-ML-frameworks/install-tensorflow-from-src/index.md +++ /dev/null @@ -1,135 +0,0 @@ -# install-tensorflow-from-src -Automatically generated README for this automation recipe: **install-tensorflow-from-src** - -Category: **[AI/ML frameworks](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-tensorflow-from-src/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get install tensorflow lib source from-source from-src src from.src" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,install,tensorflow,lib,source,from-source,from-src,src,from.src[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get install tensorflow lib source from-source from-src src from.src [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,install,tensorflow,lib,source,from-source,from-src,src,from.src' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get install tensorflow lib source from-source from-src src from.src[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_tflite` - - ENV variables: - - CM_TFLITE: `on` - -
- -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_GIT_URL: `https://github.com/tensorflow/tensorflow` - * CM_GIT_DEPTH: `1` - * CM_TFLITE: `off` - - -#### Versions -Default version: `master` - -* `master` -* `v1.15.0` -* `v2.0.0` -* `v2.1.0` -* `v2.10.0` -* `v2.11.0` -* `v2.12.0` -* `v2.13.0` -* `v2.14.0` -* `v2.15.0` -* `v2.16.1` -* `v2.2.0` -* `v2.3.0` -* `v2.4.0` -* `v2.5.0` -* `v2.6.0` -* `v2.7.0` -* `v2.8.0` -* `v2.9.0` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-tensorflow-from-src/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get install tensorflow lib source from-source from-src src from.src [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-frameworks/install-tflite-from-src/index.md b/docs/scripts/AI-ML-frameworks/install-tflite-from-src/index.md deleted file mode 100644 index f86c93efb..000000000 --- a/docs/scripts/AI-ML-frameworks/install-tflite-from-src/index.md +++ /dev/null @@ -1,100 +0,0 @@ -# install-tflite-from-src -Automatically generated README for this automation recipe: **install-tflite-from-src** - -Category: **[AI/ML frameworks](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-tflite-from-src/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get install tflite-cmake tensorflow-lite-cmake from-src" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,install,tflite-cmake,tensorflow-lite-cmake,from-src - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get install tflite-cmake tensorflow-lite-cmake from-src " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,install,tflite-cmake,tensorflow-lite-cmake,from-src' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get install tflite-cmake tensorflow-lite-cmake from-src" - ``` -___ - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_GIT_DEPTH: `1` - - -#### Versions -Default version: `master` - -* `master` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-tflite-from-src/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get install tflite-cmake tensorflow-lite-cmake from-src " -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-models/convert-ml-model-huggingface-to-onnx/index.md b/docs/scripts/AI-ML-models/convert-ml-model-huggingface-to-onnx/index.md deleted file mode 100644 index 0cf4982de..000000000 --- a/docs/scripts/AI-ML-models/convert-ml-model-huggingface-to-onnx/index.md +++ /dev/null @@ -1,101 +0,0 @@ -# convert-ml-model-huggingface-to-onnx -Automatically generated README for this automation recipe: **convert-ml-model-huggingface-to-onnx** - -Category: **[AI/ML models](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/convert-ml-model-huggingface-to-onnx/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "ml-model model huggingface-to-onnx onnx huggingface convert" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=ml-model,model,huggingface-to-onnx,onnx,huggingface,convert[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "ml-model model huggingface-to-onnx onnx huggingface convert [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'ml-model,model,huggingface-to-onnx,onnx,huggingface,convert' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "ml-model model huggingface-to-onnx onnx huggingface convert[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_model-path.#` - - ENV variables: - - CM_MODEL_HUGG_PATH: `#` - -
- - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/convert-ml-model-huggingface-to-onnx/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "ml-model model huggingface-to-onnx onnx huggingface convert [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-models/get-bert-squad-vocab/index.md b/docs/scripts/AI-ML-models/get-bert-squad-vocab/index.md deleted file mode 100644 index ab69223ae..000000000 --- a/docs/scripts/AI-ML-models/get-bert-squad-vocab/index.md +++ /dev/null @@ -1,80 +0,0 @@ -# get-bert-squad-vocab -Automatically generated README for this automation recipe: **get-bert-squad-vocab** - -Category: **[AI/ML models](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-bert-squad-vocab/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get bert squad bert-large bert-squad vocab" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,bert,squad,bert-large,bert-squad,vocab - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get bert squad bert-large bert-squad vocab " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,bert,squad,bert-large,bert-squad,vocab' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get bert squad bert-large bert-squad vocab" - ``` -___ - - -___ -#### Script output -```bash -cmr "get bert squad bert-large bert-squad vocab " -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-models/get-dlrm/index.md b/docs/scripts/AI-ML-models/get-dlrm/index.md deleted file mode 100644 index 4aa9382d1..000000000 --- a/docs/scripts/AI-ML-models/get-dlrm/index.md +++ /dev/null @@ -1,118 +0,0 @@ -# get-dlrm -Automatically generated README for this automation recipe: **get-dlrm** - -Category: **[AI/ML models](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dlrm/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dlrm/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get src dlrm" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,src,dlrm[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get src dlrm [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,src,dlrm' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get src dlrm[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_full-history` - - ENV variables: - - CM_GIT_DEPTH: `` - -
- -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_GIT_DEPTH: `--depth 10` - * CM_GIT_PATCH: `no` - * CM_GIT_URL: `https://github.com/facebookresearch/dlrm.git` - - -#### Versions -Default version: `main` - -* `main` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-dlrm/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get src dlrm [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-models/get-ml-model-3d-unet-kits19/index.md b/docs/scripts/AI-ML-models/get-ml-model-3d-unet-kits19/index.md deleted file mode 100644 index de5fe50eb..000000000 --- a/docs/scripts/AI-ML-models/get-ml-model-3d-unet-kits19/index.md +++ /dev/null @@ -1,130 +0,0 @@ -# get-ml-model-3d-unet-kits19 -Automatically generated README for this automation recipe: **get-ml-model-3d-unet-kits19** - -Category: **[AI/ML models](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-3d-unet-kits19/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get ml-model raw 3d-unet kits19 medical-imaging" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,ml-model,raw,3d-unet,kits19,medical-imaging[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get ml-model raw 3d-unet kits19 medical-imaging [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,ml-model,raw,3d-unet,kits19,medical-imaging' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get ml-model raw 3d-unet kits19 medical-imaging[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_weights` - - ENV variables: - - CM_MODEL_WEIGHTS_FILE: `yes` - -
- - - * Group "**framework**" -
- Click here to expand this section. - - * **`_onnx`** (default) - - ENV variables: - - CM_ML_MODEL_FRAMEWORK: `onnx` - * `_pytorch` - - ENV variables: - - CM_ML_MODEL_FRAMEWORK: `pytorch` - * `_tf` - - Aliases: `_tensorflow` - - ENV variables: - - CM_ML_MODEL_FRAMEWORK: `tensorflow` - -
- - - * Group "**precision**" -
- Click here to expand this section. - - * **`_fp32`** (default) - - ENV variables: - - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32` - - CM_ML_MODEL_PRECISION: `fp32` - - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32` - -
- - - ##### Default variations - - `_fp32,_onnx` - -___ -#### Script output -```bash -cmr "get ml-model raw 3d-unet kits19 medical-imaging [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-models/get-ml-model-bert-base-squad/index.md b/docs/scripts/AI-ML-models/get-ml-model-bert-base-squad/index.md deleted file mode 100644 index dc07850d6..000000000 --- a/docs/scripts/AI-ML-models/get-ml-model-bert-base-squad/index.md +++ /dev/null @@ -1,119 +0,0 @@ -# get-ml-model-bert-base-squad -Automatically generated README for this automation recipe: **get-ml-model-bert-base-squad** - -Category: **[AI/ML models](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-bert-base-squad/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get ml-model raw bert bert-base bert-squad language language-processing" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,ml-model,raw,bert,bert-base,bert-squad,language,language-processing[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get ml-model raw bert bert-base bert-squad language language-processing [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,ml-model,raw,bert,bert-base,bert-squad,language,language-processing' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get ml-model raw bert bert-base bert-squad language language-processing[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**framework**" -
- Click here to expand this section. - - * `_deepsparse` - - ENV variables: - - CM_ML_MODEL_FRAMEWORK: `deepsparse` - - CM_ML_MODEL_INPUT_IDS_NAME: `input_ids` - - CM_ML_MODEL_INPUT_MASK_NAME: `input_mask` - - CM_ML_MODEL_INPUT_SEGMENTS_NAME: `segment_ids` - - CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: `output_end_logits` - - CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: `output_start_logits` - -
- - - * Group "**precision**" -
- Click here to expand this section. - - * **`_fp32`** (default) - - ENV variables: - - CM_ML_MODEL_PRECISION: `fp32` - * `_int8` - - ENV variables: - - CM_ML_MODEL_PRECISION: `int8` - - CM_ML_MODEL_QUANTIZED: `yes` - -
- - - ##### Default variations - - `_fp32` - -___ -#### Script output -```bash -cmr "get ml-model raw bert bert-base bert-squad language language-processing [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-models/get-ml-model-bert-large-squad/index.md b/docs/scripts/AI-ML-models/get-ml-model-bert-large-squad/index.md deleted file mode 100644 index 9ba5778b3..000000000 --- a/docs/scripts/AI-ML-models/get-ml-model-bert-large-squad/index.md +++ /dev/null @@ -1,188 +0,0 @@ -# get-ml-model-bert-large-squad -Automatically generated README for this automation recipe: **get-ml-model-bert-large-squad** - -Category: **[AI/ML models](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-bert-large-squad/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get ml-model raw bert bert-large bert-squad language language-processing" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,ml-model,raw,bert,bert-large,bert-squad,language,language-processing[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get ml-model raw bert bert-large bert-squad language language-processing [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,ml-model,raw,bert,bert-large,bert-squad,language,language-processing' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get ml-model raw bert bert-large bert-squad language language-processing[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_onnxruntime` - * `_tensorflow` - -
- - - * Group "**download-source**" -
- Click here to expand this section. - - * `_amazon-s3` - * `_armi` - * `_custom-url.#` - - ENV variables: - - CM_PACKAGE_URL: `#` - * `_github` - * `_zenodo` - -
- - - * Group "**framework**" -
- Click here to expand this section. - - * `_deepsparse` - - ENV variables: - - CM_ML_MODEL_FRAMEWORK: `deepsparse` - - CM_ML_MODEL_INPUT_IDS_NAME: `input_ids` - - CM_ML_MODEL_INPUT_MASK_NAME: `input_mask` - - CM_ML_MODEL_INPUT_SEGMENTS_NAME: `segment_ids` - - CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: `output_end_logits` - - CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: `output_start_logits` - * **`_onnx`** (default) - - ENV variables: - - CM_ML_MODEL_FRAMEWORK: `onnx` - - CM_ML_MODEL_INPUT_IDS_NAME: `input_ids` - - CM_ML_MODEL_INPUT_MASK_NAME: `input_mask` - - CM_ML_MODEL_INPUT_SEGMENTS_NAME: `segment_ids` - - CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: `output_end_logits` - - CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: `output_start_logits` - * `_pytorch` - - ENV variables: - - CM_ML_MODEL_FRAMEWORK: `pytorch` - - CM_ML_MODEL_INPUT_IDS_NAME: `input_ids` - - CM_ML_MODEL_INPUT_MASK_NAME: `input_mask` - - CM_ML_MODEL_INPUT_SEGMENTS_NAME: `segment_ids` - - CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: `output_end_logits` - - CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: `output_start_logits` - * `_tf` - - ENV variables: - - CM_ML_MODEL_FRAMEWORK: `tf` - - CM_ML_MODEL_INPUT_IDS_NAME: `input_ids` - - CM_ML_MODEL_INPUT_MASK_NAME: `input_mask` - - CM_ML_MODEL_INPUT_SEGMENTS_NAME: `segment_ids` - - CM_ML_MODEL_OUTPUT_END_LOGITS_NAME: `output_end_logits` - - CM_ML_MODEL_OUTPUT_START_LOGITS_NAME: `output_start_logits` - -
- - - * Group "**packing**" -
- Click here to expand this section. - - * `_packed` - - ENV variables: - - CM_ML_MODEL_BERT_PACKED: `yes` - * **`_unpacked`** (default) - - ENV variables: - - CM_ML_MODEL_BERT_PACKED: `no` - -
- - - * Group "**precision**" -
- Click here to expand this section. - - * **`_fp32`** (default) - - ENV variables: - - CM_ML_MODEL_PRECISION: `fp32` - * `_int8` - - ENV variables: - - CM_ML_MODEL_PRECISION: `int8` - - CM_ML_MODEL_QUANTIZED: `yes` - -
- - - ##### Default variations - - `_fp32,_onnx,_unpacked` - -#### Native script being run -=== "Linux/macOS" - * [run-packed.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-bert-large-squad/run-packed.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get ml-model raw bert bert-large bert-squad language language-processing [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-models/get-ml-model-dlrm-terabyte/index.md b/docs/scripts/AI-ML-models/get-ml-model-dlrm-terabyte/index.md deleted file mode 100644 index 71138c9a6..000000000 --- a/docs/scripts/AI-ML-models/get-ml-model-dlrm-terabyte/index.md +++ /dev/null @@ -1,165 +0,0 @@ -# get-ml-model-dlrm-terabyte -Automatically generated README for this automation recipe: **get-ml-model-dlrm-terabyte** - -Category: **[AI/ML models](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-dlrm-terabyte/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get ml-model dlrm raw terabyte criteo-terabyte criteo recommendation" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,ml-model,dlrm,raw,terabyte,criteo-terabyte,criteo,recommendation[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get ml-model dlrm raw terabyte criteo-terabyte criteo recommendation [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,ml-model,dlrm,raw,terabyte,criteo-terabyte,criteo,recommendation' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get ml-model dlrm raw terabyte criteo-terabyte criteo recommendation[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_debug` - - ENV variables: - - CM_ML_MODEL_DEBUG: `yes` - -
- - - * Group "**download-tool**" -
- Click here to expand this section. - - * `_rclone` - * `_wget` - -
- - - * Group "**framework**" -
- Click here to expand this section. - - * `_onnx` - - ENV variables: - - CM_ML_MODEL_FRAMEWORK: `onnx` - * **`_pytorch`** (default) - - ENV variables: - - CM_ML_MODEL_FRAMEWORK: `pytorch` - - CM_TMP_MODEL_ADDITIONAL_NAME: `dlrm_terabyte.pytorch` - -
- - - * Group "**precision**" -
- Click here to expand this section. - - * **`_fp32`** (default) - - ENV variables: - - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32` - - CM_ML_MODEL_PRECISION: `fp32` - - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32` - -
- - - * Group "**type**" -
- Click here to expand this section. - - * **`_weight_sharded`** (default) - - ENV variables: - - CM_DLRM_MULTIHOT_MODEL: `yes` - -
- - - ##### Default variations - - `_fp32,_pytorch,_weight_sharded` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--dir=value` → `CM_DOWNLOAD_PATH=value` - * `--download_path=value` → `CM_DOWNLOAD_PATH=value` - * `--to=value` → `CM_DOWNLOAD_PATH=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-dlrm-terabyte/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get ml-model dlrm raw terabyte criteo-terabyte criteo recommendation [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-models/get-ml-model-efficientnet-lite/index.md b/docs/scripts/AI-ML-models/get-ml-model-efficientnet-lite/index.md deleted file mode 100644 index b95cc653f..000000000 --- a/docs/scripts/AI-ML-models/get-ml-model-efficientnet-lite/index.md +++ /dev/null @@ -1,191 +0,0 @@ -# get-ml-model-efficientnet-lite -Automatically generated README for this automation recipe: **get-ml-model-efficientnet-lite** - -Category: **[AI/ML models](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-efficientnet-lite/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get ml-model efficientnet raw ml-model-efficientnet ml-model-efficientnet-lite lite tflite image-classification" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,ml-model,efficientnet,raw,ml-model-efficientnet,ml-model-efficientnet-lite,lite,tflite,image-classification[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get ml-model efficientnet raw ml-model-efficientnet ml-model-efficientnet-lite lite tflite image-classification [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,ml-model,efficientnet,raw,ml-model-efficientnet,ml-model-efficientnet-lite,lite,tflite,image-classification' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get ml-model efficientnet raw ml-model-efficientnet ml-model-efficientnet-lite lite tflite image-classification[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_tflite` - -
- - - * Group "**kind**" -
- Click here to expand this section. - - * **`_lite0`** (default) - - ENV variables: - - CM_ML_MODEL_EFFICIENTNET_LITE_KIND: `lite0` - * `_lite1` - - ENV variables: - - CM_ML_MODEL_EFFICIENTNET_LITE_KIND: `lite1` - * `_lite2` - - ENV variables: - - CM_ML_MODEL_EFFICIENTNET_LITE_KIND: `lite2` - * `_lite3` - - ENV variables: - - CM_ML_MODEL_EFFICIENTNET_LITE_KIND: `lite3` - * `_lite4` - - ENV variables: - - CM_ML_MODEL_EFFICIENTNET_LITE_KIND: `lite4` - -
- - - * Group "**precision**" -
- Click here to expand this section. - - * **`_fp32`** (default) - - ENV variables: - - CM_ML_MODEL_EFFICIENTNET_LITE_PRECISION: `fp32` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` - - CM_ML_MODEL_PRECISION: `fp32` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` - * `_uint8` - - Aliases: `_int8` - - ENV variables: - - CM_ML_MODEL_EFFICIENTNET_LITE_PRECISION: `int8` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `uint8` - - CM_ML_MODEL_PRECISION: `uint8` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `uint8` - -
- - - * Group "**resolution**" -
- Click here to expand this section. - - * **`_resolution-224`** (default) - - ENV variables: - - CM_ML_MODEL_IMAGE_HEIGHT: `224` - - CM_ML_MODEL_IMAGE_WIDTH: `224` - - CM_ML_MODEL_MOBILENET_RESOLUTION: `224` - - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.224` - * `_resolution-240` - - ENV variables: - - CM_ML_MODEL_IMAGE_HEIGHT: `240` - - CM_ML_MODEL_IMAGE_WIDTH: `240` - - CM_ML_MODEL_MOBILENET_RESOLUTION: `240` - - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.240` - * `_resolution-260` - - ENV variables: - - CM_ML_MODEL_IMAGE_HEIGHT: `260` - - CM_ML_MODEL_IMAGE_WIDTH: `260` - - CM_ML_MODEL_MOBILENET_RESOLUTION: `260` - - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.260` - * `_resolution-280` - - ENV variables: - - CM_ML_MODEL_IMAGE_HEIGHT: `280` - - CM_ML_MODEL_IMAGE_WIDTH: `280` - - CM_ML_MODEL_MOBILENET_RESOLUTION: `280` - - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.280` - * `_resolution-300` - - ENV variables: - - CM_ML_MODEL_IMAGE_HEIGHT: `300` - - CM_ML_MODEL_IMAGE_WIDTH: `300` - - CM_ML_MODEL_MOBILENET_RESOLUTION: `300` - - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.300` - -
- - - ##### Default variations - - `_fp32,_lite0,_resolution-224` -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` - * CM_ML_MODEL_PRECISION: `fp32` - * CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` - - - -___ -#### Script output -```bash -cmr "get ml-model efficientnet raw ml-model-efficientnet ml-model-efficientnet-lite lite tflite image-classification [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-models/get-ml-model-gptj/index.md b/docs/scripts/AI-ML-models/get-ml-model-gptj/index.md deleted file mode 100644 index f8ba684b1..000000000 --- a/docs/scripts/AI-ML-models/get-ml-model-gptj/index.md +++ /dev/null @@ -1,194 +0,0 @@ -# get-ml-model-gptj -Automatically generated README for this automation recipe: **get-ml-model-gptj** - -Category: **[AI/ML models](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-gptj/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get raw ml-model gptj gpt-j large-language-model" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,raw,ml-model,gptj,gpt-j,large-language-model[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get raw ml-model gptj gpt-j large-language-model [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,raw,ml-model,gptj,gpt-j,large-language-model' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get raw ml-model gptj gpt-j large-language-model[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_batch_size.#` - - ENV variables: - - CM_ML_MODEL_BATCH_SIZE: `#` - -
- - - * Group "**download-tool**" -
- Click here to expand this section. - - * **`_rclone`** (default) - - ENV variables: - - CM_DOWNLOAD_FILENAME: `checkpoint` - - CM_DOWNLOAD_URL: `<<>>` - * `_wget` - - ENV variables: - - CM_DOWNLOAD_URL: `<<>>` - - CM_DOWNLOAD_FILENAME: `checkpoint.zip` - -
- - - * Group "**framework**" -
- Click here to expand this section. - - * **`_pytorch`** (default) - - ENV variables: - - CM_ML_MODEL_DATA_LAYOUT: `NCHW` - - CM_ML_MODEL_FRAMEWORK: `pytorch` - - CM_ML_STARTING_WEIGHTS_FILENAME: `<<>>` - * `_saxml` - -
- - - * Group "**model-provider**" -
- Click here to expand this section. - - * `_intel` - * **`_mlcommons`** (default) - * `_nvidia` - - ENV variables: - - CM_TMP_ML_MODEL_PROVIDER: `nvidia` - -
- - - * Group "**precision**" -
- Click here to expand this section. - - * `_fp32` - - ENV variables: - - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32` - - CM_ML_MODEL_PRECISION: `fp32` - - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32` - * `_fp8` - - ENV variables: - - CM_ML_MODEL_INPUT_DATA_TYPES: `fp8` - - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp8` - * `_int4` - - ENV variables: - - CM_ML_MODEL_INPUT_DATA_TYPES: `int4` - - CM_ML_MODEL_WEIGHT_DATA_TYPES: `int4` - * `_int8` - - ENV variables: - - CM_ML_MODEL_INPUT_DATA_TYPES: `int8` - - CM_ML_MODEL_PRECISION: `int8` - - CM_ML_MODEL_WEIGHT_DATA_TYPES: `int8` - * `_uint8` - - ENV variables: - - CM_ML_MODEL_INPUT_DATA_TYPES: `uint8` - - CM_ML_MODEL_PRECISION: `uint8` - - CM_ML_MODEL_WEIGHT_DATA_TYPES: `uint8` - -
- - - ##### Default variations - - `_mlcommons,_pytorch,_rclone` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--checkpoint=value` → `GPTJ_CHECKPOINT_PATH=value` - * `--download_path=value` → `CM_DOWNLOAD_PATH=value` - * `--to=value` → `CM_DOWNLOAD_PATH=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run-int4-calibration.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-gptj/run-int4-calibration.sh) - * [run-intel.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-gptj/run-intel.sh) - * [run-nvidia.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-gptj/run-nvidia.sh) - * [run-saxml-quantized.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-gptj/run-saxml-quantized.sh) - * [run-saxml.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-gptj/run-saxml.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get raw ml-model gptj gpt-j large-language-model [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-models/get-ml-model-huggingface-zoo/index.md b/docs/scripts/AI-ML-models/get-ml-model-huggingface-zoo/index.md deleted file mode 100644 index 5f5ef67fe..000000000 --- a/docs/scripts/AI-ML-models/get-ml-model-huggingface-zoo/index.md +++ /dev/null @@ -1,136 +0,0 @@ -# get-ml-model-huggingface-zoo -Automatically generated README for this automation recipe: **get-ml-model-huggingface-zoo** - -Category: **[AI/ML models](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-huggingface-zoo/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-huggingface-zoo/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get ml-model huggingface zoo" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,ml-model,huggingface,zoo[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get ml-model huggingface zoo [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,ml-model,huggingface,zoo' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get ml-model huggingface zoo[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_model-stub.#` - - ENV variables: - - CM_MODEL_ZOO_STUB: `#` - * `_onnx-subfolder` - - ENV variables: - - CM_HF_SUBFOLDER: `onnx` - * `_pierreguillou_bert_base_cased_squad_v1.1_portuguese` - - ENV variables: - - CM_MODEL_ZOO_STUB: `pierreguillou/bert-base-cased-squad-v1.1-portuguese` - * `_prune` - - ENV variables: - - CM_MODEL_TASK: `prune` - -
- - - * Group "**download-type**" -
- Click here to expand this section. - - * `_clone-repo` - - ENV variables: - - CM_GIT_CLONE_REPO: `yes` - -
- -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--download_path=value` → `CM_DOWNLOAD_PATH=value` - * `--env_key=value` → `CM_MODEL_ZOO_ENV_KEY=value` - * `--full_subfolder=value` → `CM_HF_FULL_SUBFOLDER=value` - * `--model_filename=value` → `CM_MODEL_ZOO_FILENAME=value` - * `--revision=value` → `CM_HF_REVISION=value` - * `--subfolder=value` → `CM_HF_SUBFOLDER=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-huggingface-zoo/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-huggingface-zoo/run.bat) -___ -#### Script output -```bash -cmr "get ml-model huggingface zoo [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-models/get-ml-model-llama2/index.md b/docs/scripts/AI-ML-models/get-ml-model-llama2/index.md deleted file mode 100644 index fe9e5136a..000000000 --- a/docs/scripts/AI-ML-models/get-ml-model-llama2/index.md +++ /dev/null @@ -1,161 +0,0 @@ -# get-ml-model-llama2 -Automatically generated README for this automation recipe: **get-ml-model-llama2** - -Category: **[AI/ML models](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-llama2/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get raw ml-model language-processing llama2 llama2-70b text-summarization" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,raw,ml-model,language-processing,llama2,llama2-70b,text-summarization[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get raw ml-model language-processing llama2 llama2-70b text-summarization [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,raw,ml-model,language-processing,llama2,llama2-70b,text-summarization' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get raw ml-model language-processing llama2 llama2-70b text-summarization[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_batch_size.#` - - ENV variables: - - CM_ML_MODEL_BATCH_SIZE: `#` - -
- - - * Group "**framework**" -
- Click here to expand this section. - - * **`_pytorch`** (default) - - ENV variables: - - CM_ML_MODEL_FRAMEWORK: `pytorch` - -
- - - * Group "**huggingface-stub**" -
- Click here to expand this section. - - * **`_meta-llama/Llama-2-70b-chat-hf`** (default) - - ENV variables: - - CM_GIT_CHECKOUT_FOLDER: `Llama-2-70b-chat-hf` - - CM_MODEL_ZOO_ENV_KEY: `LLAMA2` - * `_meta-llama/Llama-2-7b-chat-hf` - - ENV variables: - - CM_GIT_CHECKOUT_FOLDER: `Llama-2-7b-chat-hf` - - CM_MODEL_ZOO_ENV_KEY: `LLAMA2` - * `_stub.#` - - ENV variables: - - CM_MODEL_ZOO_ENV_KEY: `LLAMA2` - -
- - - * Group "**precision**" -
- Click here to expand this section. - - * **`_fp32`** (default) - - ENV variables: - - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32` - - CM_ML_MODEL_PRECISION: `fp32` - - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32` - * `_int8` - - ENV variables: - - CM_ML_MODEL_INPUT_DATA_TYPES: `int8` - - CM_ML_MODEL_PRECISION: `int8` - - CM_ML_MODEL_WEIGHT_DATA_TYPES: `int8` - * `_uint8` - - ENV variables: - - CM_ML_MODEL_INPUT_DATA_TYPES: `uint8` - - CM_ML_MODEL_PRECISION: `uint8` - - CM_ML_MODEL_WEIGHT_DATA_TYPES: `uint8` - -
- - - ##### Default variations - - `_fp32,_meta-llama/Llama-2-70b-chat-hf,_pytorch` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--checkpoint=value` → `LLAMA2_CHECKPOINT_PATH=value` - - - - -___ -#### Script output -```bash -cmr "get raw ml-model language-processing llama2 llama2-70b text-summarization [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-models/get-ml-model-mobilenet/index.md b/docs/scripts/AI-ML-models/get-ml-model-mobilenet/index.md deleted file mode 100644 index e34f128e8..000000000 --- a/docs/scripts/AI-ML-models/get-ml-model-mobilenet/index.md +++ /dev/null @@ -1,288 +0,0 @@ -# get-ml-model-mobilenet -Automatically generated README for this automation recipe: **get-ml-model-mobilenet** - -Category: **[AI/ML models](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-mobilenet/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-mobilenet/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get ml-model mobilenet raw ml-model-mobilenet image-classification" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,ml-model,mobilenet,raw,ml-model-mobilenet,image-classification[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get ml-model mobilenet raw ml-model-mobilenet image-classification [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,ml-model,mobilenet,raw,ml-model-mobilenet,image-classification' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get ml-model mobilenet raw ml-model-mobilenet image-classification[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_tflite` - -
- - - * Group "**framework**" -
- Click here to expand this section. - - * `_onnx` - - ENV variables: - - CM_ML_MODEL_DATA_LAYOUT: `NCHW` - - CM_ML_MODEL_FRAMEWORK: `onnx` - * **`_tf`** (default) - - ENV variables: - - CM_ML_MODEL_DATA_LAYOUT: `NHWC` - - CM_ML_MODEL_NORMALIZE_DATA: `yes` - - CM_ML_MODEL_SUBTRACT_MEANS: `no` - - CM_ML_MODEL_INPUT_LAYER_NAME: `input` - -
- - - * Group "**kind**" -
- Click here to expand this section. - - * `_large` - - ENV variables: - - CM_ML_MODEL_MOBILENET_KIND: `large` - * `_large-minimalistic` - - ENV variables: - - CM_ML_MODEL_MOBILENET_KIND: `large-minimalistic` - * `_small` - - ENV variables: - - CM_ML_MODEL_MOBILENET_KIND: `small` - * `_small-minimalistic` - - ENV variables: - - CM_ML_MODEL_MOBILENET_KIND: `small-minimalistic` - -
- - - * Group "**multiplier**" -
- Click here to expand this section. - - * `_multiplier-0.25` - - ENV variables: - - CM_ML_MODEL_MOBILENET_MULTIPLIER: `0.25` - - CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: `25` - * `_multiplier-0.35` - - ENV variables: - - CM_ML_MODEL_MOBILENET_MULTIPLIER: `0.35` - - CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: `35` - * `_multiplier-0.5` - - ENV variables: - - CM_ML_MODEL_MOBILENET_MULTIPLIER: `0.5` - - CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: `50` - * `_multiplier-0.75` - - ENV variables: - - CM_ML_MODEL_MOBILENET_MULTIPLIER: `0.75` - - CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: `75` - * `_multiplier-1.0` - - ENV variables: - - CM_ML_MODEL_MOBILENET_MULTIPLIER: `1.0` - - CM_ML_MODEL_MOBILENET_MULTIPLIER_PERCENTAGE: `100` - -
- - - * Group "**opset-version**" -
- Click here to expand this section. - - * `_opset-11` - - ENV variables: - - CM_ML_MODEL_ONNX_OPSET: `11` - * `_opset-8` - - ENV variables: - - CM_ML_MODEL_ONNX_OPSET: `8` - -
- - - * Group "**precision**" -
- Click here to expand this section. - - * **`_fp32`** (default) - - ENV variables: - - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` - - CM_ML_MODEL_PRECISION: `fp32` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` - - CM_ML_MODEL_MOBILENET_PRECISION: `float` - * `_int8` - - ENV variables: - - CM_ML_MODEL_INPUTS_DATA_TYPE: `int8` - - CM_ML_MODEL_PRECISION: `int8` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` - - CM_ML_MODEL_MOBILENET_PRECISION: `int8` - * `_uint8` - - ENV variables: - - CM_ML_MODEL_INPUTS_DATA_TYPE: `uint8` - - CM_ML_MODEL_PRECISION: `uint8` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `uint8` - - CM_ML_MODEL_MOBILENET_PRECISION: `uint8` - -
- - - * Group "**resolution**" -
- Click here to expand this section. - - * `_resolution-128` - - ENV variables: - - CM_ML_MODEL_MOBILENET_RESOLUTION: `128` - - CM_ML_MODEL_IMAGE_HEIGHT: `128` - - CM_ML_MODEL_IMAGE_WIDTH: `128` - - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.128` - * `_resolution-160` - - ENV variables: - - CM_ML_MODEL_MOBILENET_RESOLUTION: `160` - - CM_ML_MODEL_IMAGE_HEIGHT: `160` - - CM_ML_MODEL_IMAGE_WIDTH: `160` - - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.160` - * `_resolution-192` - - ENV variables: - - CM_ML_MODEL_MOBILENET_RESOLUTION: `192` - - CM_ML_MODEL_IMAGE_HEIGHT: `192` - - CM_ML_MODEL_IMAGE_WIDTH: `192` - - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.192` - * `_resolution-224` - - ENV variables: - - CM_ML_MODEL_MOBILENET_RESOLUTION: `224` - - CM_ML_MODEL_IMAGE_HEIGHT: `224` - - CM_ML_MODEL_IMAGE_WIDTH: `224` - - CM_DATASET_PREPROCESSED_IMAGENET_DEP_TAGS: `_resolution.224` - -
- - - * Group "**source**" -
- Click here to expand this section. - - * `_from.google` - - ENV variables: - - CM_DOWNLOAD_SOURCE: `google` - * `_from.zenodo` - - ENV variables: - - CM_DOWNLOAD_SOURCE: `zenodo` - -
- - - * Group "**version**" -
- Click here to expand this section. - - * `_v1` - - ENV variables: - - CM_ML_MODEL_MOBILENET_VERSION: `1` - - CM_ML_MODEL_FULL_NAME: `mobilenet-v1-precision_<<>>-<<>>-<<>>` - * `_v2` - - ENV variables: - - CM_ML_MODEL_MOBILENET_VERSION: `2` - - CM_ML_MODEL_VER: `2` - - CM_ML_MODEL_FULL_NAME: `mobilenet-v2-precision_<<>>-<<>>-<<>>` - * **`_v3`** (default) - - ENV variables: - - CM_ML_MODEL_MOBILENET_VERSION: `3` - - CM_ML_MODEL_VER: `3` - - CM_ML_MODEL_FULL_NAME: `mobilenet-v3-precision_<<>>-<<>>-<<>>` - -
- - - ##### Default variations - - `_fp32,_tf,_v3` -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_ML_MODEL: `mobilenet` - * CM_ML_MODEL_DATASET: `imagenet2012-val` - * CM_ML_MODEL_RETRAINING: `no` - * CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `no` - * CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` - * CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` - * CM_ML_MODEL_MOBILENET_NAME_SUFFIX: `` - - - -___ -#### Script output -```bash -cmr "get ml-model mobilenet raw ml-model-mobilenet image-classification [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-models/get-ml-model-neuralmagic-zoo/index.md b/docs/scripts/AI-ML-models/get-ml-model-neuralmagic-zoo/index.md deleted file mode 100644 index ddbfc6af0..000000000 --- a/docs/scripts/AI-ML-models/get-ml-model-neuralmagic-zoo/index.md +++ /dev/null @@ -1,271 +0,0 @@ -# get-ml-model-neuralmagic-zoo -Automatically generated README for this automation recipe: **get-ml-model-neuralmagic-zoo** - -Category: **[AI/ML models](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-neuralmagic-zoo/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get ml-model model zoo deepsparse model-zoo sparse-zoo neuralmagic neural-magic" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,ml-model,model,zoo,deepsparse,model-zoo,sparse-zoo,neuralmagic,neural-magic[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get ml-model model zoo deepsparse model-zoo sparse-zoo neuralmagic neural-magic [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,ml-model,model,zoo,deepsparse,model-zoo,sparse-zoo,neuralmagic,neural-magic' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get ml-model model zoo deepsparse model-zoo sparse-zoo neuralmagic neural-magic[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_bert-base-pruned90-none` - - Aliases: `_model-stub.zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned90-none` - - ENV variables: - - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned90-none` - - CM_ML_MODEL_FULL_NAME: `bert-base-pruned90-none-bert-99` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-base-uncased` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` - - CM_ML_MODEL_RETRAINING: `no` - * `_bert-base-pruned95_obs_quant-none` - - Aliases: `_model-stub.zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned95_obs_quant-none` - - ENV variables: - - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/bert-base/pytorch/huggingface/squad/pruned95_obs_quant-none` - - CM_ML_MODEL_FULL_NAME: `bert-base-pruned95_obs_quant-none-bert-99` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-base-uncased` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, unstructured pruning` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `int64` - - CM_ML_MODEL_RETRAINING: `yes` - * `_bert-base_cased-pruned90-none` - - Aliases: `_model-stub.zoo:nlp/question_answering/bert-base_cased/pytorch/huggingface/squad/pruned90-none` - - ENV variables: - - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/bert-base_cased/pytorch/huggingface/squad/pruned90-none` - - CM_ML_MODEL_FULL_NAME: `bert-base_cased-pruned90-none-bert-99` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-base-cased` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` - - CM_ML_MODEL_RETRAINING: `no` - * `_bert-large-base-none` - - Aliases: `_model-stub.zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/base-none` - - ENV variables: - - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/base-none` - - CM_ML_MODEL_FULL_NAME: `bert-large-base-none-bert-99` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-large-uncased` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` - - CM_ML_MODEL_RETRAINING: `no` - * `_bert-large-pruned80_quant-none-vnni` - - Aliases: `_model-stub.zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/pruned80_quant-none-vnni` - - ENV variables: - - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/bert-large/pytorch/huggingface/squad/pruned80_quant-none-vnni` - - CM_ML_MODEL_FULL_NAME: `bert-large-pruned80_quant-none-vnni-bert-99` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-large-uncased` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, unstructured pruning` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `int64` - - CM_ML_MODEL_RETRAINING: `no` - * `_mobilebert-14layer_pruned50-none-vnni` - - Aliases: `_model-stub.zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50-none-vnni` - - ENV variables: - - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50-none-vnni` - - CM_ML_MODEL_FULL_NAME: `mobilebert-14layer_pruned50-none-vnni-bert-99` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` - - CM_ML_MODEL_RETRAINING: `no` - * `_mobilebert-14layer_pruned50_quant-none-vnni` - - Aliases: `_model-stub.zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50_quant-none-vnni` - - ENV variables: - - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/14layer_pruned50_quant-none-vnni` - - CM_ML_MODEL_FULL_NAME: `mobilebert-14layer_pruned50_quant-none-vnni-bert-99` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, unstructured pruning` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `int64` - - CM_ML_MODEL_RETRAINING: `yes` - * `_mobilebert-base_quant-none` - - Aliases: `_model-stub.zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base_quant-none` - - ENV variables: - - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base_quant-none` - - CM_ML_MODEL_FULL_NAME: `mobilebert-base_quant-none-bert-99` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, unstructured pruning` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `int64` - - CM_ML_MODEL_RETRAINING: `yes` - * `_mobilebert-none-base-none` - - Aliases: `_model-stub.zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base-none` - - ENV variables: - - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base-none` - - CM_ML_MODEL_FULL_NAME: `mobilebert-none-base-none-bert-99` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://storage.googleapis.com/cloud-tpu-checkpoints/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT.tar.gz` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` - - CM_ML_MODEL_RETRAINING: `no` - * `_model-stub.#` - - ENV variables: - - CM_MODEL_ZOO_STUB: `#` - * `_obert-base-pruned90-none` - - Aliases: `_model-stub.zoo:nlp/question_answering/obert-base/pytorch/huggingface/squad/pruned90-none` - - ENV variables: - - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/obert-base/pytorch/huggingface/squad/pruned90-none` - - CM_ML_MODEL_FULL_NAME: `obert-base-pruned90-none-bert-99` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-large-uncased` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` - - CM_ML_MODEL_RETRAINING: `no` - * `_obert-large-base-none` - - Aliases: `_model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/base-none` - - ENV variables: - - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/base-none` - - CM_ML_MODEL_FULL_NAME: `obert-large-base-none-bert-99` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-large-uncased` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` - - CM_ML_MODEL_RETRAINING: `no` - * `_obert-large-pruned95-none-vnni` - - Aliases: `_model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95-none-vnni` - - ENV variables: - - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95-none-vnni` - - CM_ML_MODEL_FULL_NAME: `obert-large-pruned95-none-vnni-bert-99` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-large-uncased` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` - - CM_ML_MODEL_RETRAINING: `no` - * `_obert-large-pruned95_quant-none-vnni` - - Aliases: `_model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95_quant-none-vnni` - - ENV variables: - - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned95_quant-none-vnni` - - CM_ML_MODEL_FULL_NAME: `obert-large-pruned95_quant-none-vnni-bert-99` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-large-uncased` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, unstructured pruning` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `int64` - - CM_ML_MODEL_RETRAINING: `yes` - * `_obert-large-pruned97-none` - - Aliases: `_model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97-none` - - ENV variables: - - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97-none` - - CM_ML_MODEL_FULL_NAME: `obert-large-pruned97-none-bert-99` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-large-uncased` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `unstructured pruning` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp32` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` - - CM_ML_MODEL_RETRAINING: `no` - * `_obert-large-pruned97-quant-none` - - Aliases: `_model-stub.zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97_quant-none` - - ENV variables: - - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/obert-large/pytorch/huggingface/squad/pruned97_quant-none` - - CM_ML_MODEL_FULL_NAME: `obert-large-pruned97-quant-none-bert-99` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/bert-large-uncased` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, unstructured pruning` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `int64` - - CM_ML_MODEL_RETRAINING: `no` - * `_oberta-base-pruned90-quant-none` - - Aliases: `_model-stub.zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/pruned90_quant-none` - - ENV variables: - - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/oberta-base/pytorch/huggingface/squad/pruned90_quant-none` - - CM_ML_MODEL_FULL_NAME: `oberta-base-pruned90-quant-none-bert-99` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/roberta-base` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, unstructured pruning` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `int64` - - CM_ML_MODEL_RETRAINING: `no` - * `_roberta-base-pruned85-quant-none` - - Aliases: `_model-stub.zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/pruned85_quant-none` - - ENV variables: - - CM_MODEL_ZOO_STUB: `zoo:nlp/question_answering/roberta-base/pytorch/huggingface/squad/pruned85_quant-none` - - CM_ML_MODEL_FULL_NAME: `roberta-base-pruned85-quant-none-bert-99` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://huggingface.co/roberta-base` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, unstructured pruning` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `int64` - - CM_ML_MODEL_RETRAINING: `no` - -
- - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-neuralmagic-zoo/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-neuralmagic-zoo/run.bat) -___ -#### Script output -```bash -cmr "get ml-model model zoo deepsparse model-zoo sparse-zoo neuralmagic neural-magic [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-models/get-ml-model-resnet50/index.md b/docs/scripts/AI-ML-models/get-ml-model-resnet50/index.md deleted file mode 100644 index 0f2ff13a7..000000000 --- a/docs/scripts/AI-ML-models/get-ml-model-resnet50/index.md +++ /dev/null @@ -1,228 +0,0 @@ -# get-ml-model-resnet50 -Automatically generated README for this automation recipe: **get-ml-model-resnet50** - -Category: **[AI/ML models](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-resnet50/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-resnet50/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get raw ml-model resnet50 ml-model-resnet50 image-classification" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,raw,ml-model,resnet50,ml-model-resnet50,image-classification[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get raw ml-model resnet50 ml-model-resnet50 image-classification [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,raw,ml-model,resnet50,ml-model-resnet50,image-classification' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get raw ml-model resnet50 ml-model-resnet50 image-classification[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_batch_size.#` - - ENV variables: - - CM_ML_MODEL_BATCH_SIZE: `#` - * `_batch_size.1` - - ENV variables: - - CM_ML_MODEL_BATCH_SIZE: `1` - * `_fix-input-shape` - * `_from-tf` - * `_huggingface_default` - - ENV variables: - - CM_PACKAGE_URL: `https://huggingface.co/ctuning/mlperf-inference-resnet50-onnx-fp32-imagenet2012-v1.0/resolve/main/resnet50_v1.onnx` - -
- - - * Group "**framework**" -
- Click here to expand this section. - - * `_ncnn` - - ENV variables: - - CM_ML_MODEL_FRAMEWORK: `ncnn` - * **`_onnx`** (default) - - Aliases: `_onnxruntime` - - ENV variables: - - CM_ML_MODEL_DATA_LAYOUT: `NCHW` - - CM_ML_MODEL_FRAMEWORK: `onnx` - - CM_ML_MODEL_INPUT_LAYERS: `input_tensor:0` - - CM_ML_MODEL_INPUT_LAYER_NAME: `input_tensor:0` - - CM_ML_MODEL_INPUT_SHAPES: `\"input_tensor:0\": (BATCH_SIZE, 3, 224, 224)` - - CM_ML_MODEL_OUTPUT_LAYERS: `softmax_tensor:0` - - CM_ML_MODEL_OUTPUT_LAYER_NAME: `softmax_tensor:0` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `<<>>` - - CM_ML_MODEL_VER: `1.5` - * `_pytorch` - - ENV variables: - - CM_ML_MODEL_DATA_LAYOUT: `NCHW` - - CM_ML_MODEL_FRAMEWORK: `pytorch` - - CM_ML_MODEL_GIVEN_CHANNEL_MEANS: `?` - - CM_ML_MODEL_INPUT_LAYER_NAME: `input_tensor:0` - - CM_ML_MODEL_INPUT_SHAPES: `\"input_tensor:0\": [BATCH_SIZE, 3, 224, 224]` - - CM_ML_MODEL_OUTPUT_LAYERS: `output` - - CM_ML_MODEL_OUTPUT_LAYER_NAME: `?` - - CM_ML_STARTING_WEIGHTS_FILENAME: `<<>>` - * `_tensorflow` - - Aliases: `_tf` - - ENV variables: - - CM_ML_MODEL_ACCURACY: `76.456` - - CM_ML_MODEL_DATA_LAYOUT: `NHWC` - - CM_ML_MODEL_FRAMEWORK: `tensorflow` - - CM_ML_MODEL_GIVEN_CHANNEL_MEANS: `123.68 116.78 103.94` - - CM_ML_MODEL_INPUT_LAYERS: `input_tensor` - - CM_ML_MODEL_INPUT_LAYER_NAME: `input_tensor` - - CM_ML_MODEL_INPUT_SHAPES: `\"input_tensor:0\": (BATCH_SIZE, 3, 224, 224)` - - CM_ML_MODEL_NORMALIZE_DATA: `0` - - CM_ML_MODEL_OUTPUT_LAYERS: `softmax_tensor` - - CM_ML_MODEL_OUTPUT_LAYER_NAME: `softmax_tensor` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `<<>>` - - CM_ML_MODEL_SUBTRACT_MEANS: `YES` - - CM_PACKAGE_URL: `https://zenodo.org/record/2535873/files/resnet50_v1.pb` - * `_tflite` - - ENV variables: - - CM_ML_MODEL_ACCURACY: `76.456` - - CM_ML_MODEL_DATA_LAYOUT: `NHWC` - - CM_ML_MODEL_FRAMEWORK: `tflite` - - CM_ML_MODEL_GIVEN_CHANNEL_MEANS: `123.68 116.78 103.94` - - CM_ML_MODEL_INPUT_LAYERS: `input_tensor` - - CM_ML_MODEL_INPUT_LAYER_NAME: `input_tensor` - - CM_ML_MODEL_INPUT_SHAPES: `\"input_tensor 2\": (BATCH_SIZE, 224, 224, 3)` - - CM_ML_MODEL_NORMALIZE_DATA: `0` - - CM_ML_MODEL_OUTPUT_LAYERS: `softmax_tensor` - - CM_ML_MODEL_OUTPUT_LAYER_NAME: `softmax_tensor` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `<<>>` - - CM_ML_MODEL_SUBTRACT_MEANS: `YES` - -
- - - * Group "**model-output**" -
- Click here to expand this section. - - * **`_argmax`** (default) - - ENV variables: - - CM_ML_MODEL_OUTPUT_LAYER_ARGMAX: `yes` - * `_no-argmax` - - ENV variables: - - CM_ML_MODEL_OUTPUT_LAYER_ARGMAX: `no` - -
- - - * Group "**opset-version**" -
- Click here to expand this section. - - * `_opset-11` - - ENV variables: - - CM_ML_MODEL_ONNX_OPSET: `11` - * `_opset-8` - - ENV variables: - - CM_ML_MODEL_ONNX_OPSET: `8` - -
- - - * Group "**precision**" -
- Click here to expand this section. - - * **`_fp32`** (default) - - ENV variables: - - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32` - - CM_ML_MODEL_PRECISION: `fp32` - - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32` - * `_int8` - - ENV variables: - - CM_ML_MODEL_INPUT_DATA_TYPES: `int8` - - CM_ML_MODEL_PRECISION: `int8` - - CM_ML_MODEL_WEIGHT_DATA_TYPES: `int8` - * `_uint8` - - ENV variables: - - CM_ML_MODEL_INPUT_DATA_TYPES: `uint8` - - CM_ML_MODEL_PRECISION: `uint8` - - CM_ML_MODEL_WEIGHT_DATA_TYPES: `uint8` - -
- - - ##### Default variations - - `_argmax,_fp32,_onnx` - -#### Native script being run -=== "Linux/macOS" - * [run-fix-input.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-resnet50/run-fix-input.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get raw ml-model resnet50 ml-model-resnet50 image-classification [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-models/get-ml-model-retinanet-nvidia/index.md b/docs/scripts/AI-ML-models/get-ml-model-retinanet-nvidia/index.md deleted file mode 100644 index aa0894064..000000000 --- a/docs/scripts/AI-ML-models/get-ml-model-retinanet-nvidia/index.md +++ /dev/null @@ -1,111 +0,0 @@ -# get-ml-model-retinanet-nvidia -Automatically generated README for this automation recipe: **get-ml-model-retinanet-nvidia** - -Category: **[AI/ML models](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-retinanet-nvidia/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get ml-model nvidia-retinanet nvidia" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,ml-model,nvidia-retinanet,nvidia[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get ml-model nvidia-retinanet nvidia [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,ml-model,nvidia-retinanet,nvidia' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get ml-model nvidia-retinanet nvidia[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_efficient-nms` - - ENV variables: - - CM_NVIDIA_EFFICIENT_NMS: `yes` - -
- -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_TORCH_DEVICE: `cpu` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-retinanet-nvidia/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get ml-model nvidia-retinanet nvidia [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-models/get-ml-model-retinanet/index.md b/docs/scripts/AI-ML-models/get-ml-model-retinanet/index.md deleted file mode 100644 index db0a15981..000000000 --- a/docs/scripts/AI-ML-models/get-ml-model-retinanet/index.md +++ /dev/null @@ -1,140 +0,0 @@ -# get-ml-model-retinanet -Automatically generated README for this automation recipe: **get-ml-model-retinanet** - -Category: **[AI/ML models](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-retinanet/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-retinanet/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get ml-model raw resnext50 retinanet object-detection" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,ml-model,raw,resnext50,retinanet,object-detection[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get ml-model raw resnext50 retinanet object-detection [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,ml-model,raw,resnext50,retinanet,object-detection' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get ml-model raw resnext50 retinanet object-detection[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_no-nms` - - ENV variables: - - CM_TMP_ML_MODEL_RETINANET_NO_NMS: `yes` - - CM_ML_MODEL_RETINANET_NO_NMS: `yes` - - CM_QAIC_PRINT_NODE_PRECISION_INFO: `yes` - * `_weights` - - ENV variables: - - CM_MODEL_WEIGHTS_FILE: `yes` - -
- - - * Group "**framework**" -
- Click here to expand this section. - - * **`_onnx`** (default) - - ENV variables: - - CM_ML_MODEL_DATA_LAYOUT: `NCHW` - - CM_ML_MODEL_FRAMEWORK: `onnx` - * `_pytorch` - - ENV variables: - - CM_ML_MODEL_DATA_LAYOUT: `NCHW` - - CM_ML_MODEL_FRAMEWORK: `pytorch` - -
- - - * Group "**precision**" -
- Click here to expand this section. - - * **`_fp32`** (default) - - ENV variables: - - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32` - - CM_ML_MODEL_PRECISION: `fp32` - - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32` - -
- - - ##### Default variations - - `_fp32,_onnx` - -#### Native script being run -=== "Linux/macOS" - * [run-no-nms.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-retinanet/run-no-nms.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get ml-model raw resnext50 retinanet object-detection [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-models/get-ml-model-rnnt/index.md b/docs/scripts/AI-ML-models/get-ml-model-rnnt/index.md deleted file mode 100644 index 82a0da040..000000000 --- a/docs/scripts/AI-ML-models/get-ml-model-rnnt/index.md +++ /dev/null @@ -1,133 +0,0 @@ -# get-ml-model-rnnt -Automatically generated README for this automation recipe: **get-ml-model-rnnt** - -Category: **[AI/ML models](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-rnnt/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get ml-model rnnt raw librispeech speech-recognition" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,ml-model,rnnt,raw,librispeech,speech-recognition[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get ml-model rnnt raw librispeech speech-recognition [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,ml-model,rnnt,raw,librispeech,speech-recognition' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get ml-model rnnt raw librispeech speech-recognition[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_weights` - - ENV variables: - - CM_MODEL_WEIGHTS_FILE: `yes` - -
- - - * Group "**download-src**" -
- Click here to expand this section. - - * `_amazon-s3` - * **`_zenodo`** (default) - -
- - - * Group "**framework**" -
- Click here to expand this section. - - * **`_pytorch`** (default) - - ENV variables: - - CM_ML_MODEL_FRAMEWORK: `pytorch` - -
- - - * Group "**precision**" -
- Click here to expand this section. - - * **`_fp32`** (default) - - ENV variables: - - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32` - - CM_ML_MODEL_PRECISION: `fp32` - - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32` - -
- - - ##### Default variations - - `_fp32,_pytorch,_zenodo` - -___ -#### Script output -```bash -cmr "get ml-model rnnt raw librispeech speech-recognition [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-models/get-ml-model-stable-diffusion/index.md b/docs/scripts/AI-ML-models/get-ml-model-stable-diffusion/index.md deleted file mode 100644 index e488ff105..000000000 --- a/docs/scripts/AI-ML-models/get-ml-model-stable-diffusion/index.md +++ /dev/null @@ -1,177 +0,0 @@ -# get-ml-model-stable-diffusion -Automatically generated README for this automation recipe: **get-ml-model-stable-diffusion** - -Category: **[AI/ML models](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-stable-diffusion/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get raw ml-model stable-diffusion sdxl text-to-image" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,raw,ml-model,stable-diffusion,sdxl,text-to-image[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get raw ml-model stable-diffusion sdxl text-to-image [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,raw,ml-model,stable-diffusion,sdxl,text-to-image' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get raw ml-model stable-diffusion sdxl text-to-image[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_batch_size.#` - - ENV variables: - - CM_ML_MODEL_BATCH_SIZE: `#` - -
- - - * Group "**download-source**" -
- Click here to expand this section. - - * `_huggingface` - * **`_mlcommons`** (default) - -
- - - * Group "**download-tool**" -
- Click here to expand this section. - - * `_git` - - ENV variables: - - CM_DOWNLOAD_TOOL: `git` - * `_rclone` - - ENV variables: - - CM_RCLONE_CONFIG_CMD: `rclone config create mlc-inference s3 provider=Cloudflare access_key_id=f65ba5eef400db161ea49967de89f47b secret_access_key=fbea333914c292b854f14d3fe232bad6c5407bf0ab1bebf78833c2b359bdfd2b endpoint=https://c2686074cb2caf5cbaf6d134bdba8b47.r2.cloudflarestorage.com` - - CM_DOWNLOAD_TOOL: `rclone` - * `_wget` - - ENV variables: - - CM_DOWNLOAD_TOOL: `wget` - -
- - - * Group "**framework**" -
- Click here to expand this section. - - * **`_pytorch`** (default) - - ENV variables: - - CM_ML_MODEL_FRAMEWORK: `pytorch` - -
- - - * Group "**precision**" -
- Click here to expand this section. - - * `_fp16` - - ENV variables: - - CM_ML_MODEL_INPUT_DATA_TYPES: `fp16` - - CM_ML_MODEL_PRECISION: `fp16` - - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp16` - * **`_fp32`** (default) - - ENV variables: - - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32` - - CM_ML_MODEL_PRECISION: `fp32` - - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32` - * `_int8` - - ENV variables: - - CM_ML_MODEL_INPUT_DATA_TYPES: `int8` - - CM_ML_MODEL_PRECISION: `int8` - - CM_ML_MODEL_WEIGHT_DATA_TYPES: `int8` - * `_uint8` - - ENV variables: - - CM_ML_MODEL_INPUT_DATA_TYPES: `uint8` - - CM_ML_MODEL_PRECISION: `uint8` - - CM_ML_MODEL_WEIGHT_DATA_TYPES: `uint8` - -
- - - ##### Default variations - - `_fp32,_mlcommons,_pytorch` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--checkpoint=value` → `SDXL_CHECKPOINT_PATH=value` - * `--download_path=value` → `CM_DOWNLOAD_PATH=value` - * `--to=value` → `CM_DOWNLOAD_PATH=value` - - - - -___ -#### Script output -```bash -cmr "get raw ml-model stable-diffusion sdxl text-to-image [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-models/get-ml-model-tiny-resnet/index.md b/docs/scripts/AI-ML-models/get-ml-model-tiny-resnet/index.md deleted file mode 100644 index cb4084c9f..000000000 --- a/docs/scripts/AI-ML-models/get-ml-model-tiny-resnet/index.md +++ /dev/null @@ -1,153 +0,0 @@ -# get-ml-model-tiny-resnet -Automatically generated README for this automation recipe: **get-ml-model-tiny-resnet** - -Category: **[AI/ML models](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-tiny-resnet/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get raw ml-model resnet pretrained tiny model ic ml-model-tiny-resnet image-classification" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,raw,ml-model,resnet,pretrained,tiny,model,ic,ml-model-tiny-resnet,image-classification[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get raw ml-model resnet pretrained tiny model ic ml-model-tiny-resnet image-classification [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,raw,ml-model,resnet,pretrained,tiny,model,ic,ml-model-tiny-resnet,image-classification' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get raw ml-model resnet pretrained tiny model ic ml-model-tiny-resnet image-classification[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_batch_size.#` - - ENV variables: - - CM_ML_MODEL_BATCH_SIZE: `#` - -
- - - * Group "**framework**" -
- Click here to expand this section. - - * `_onnx` - - ENV variables: - - CM_TMP_ML_MODEL_TF2ONNX: `yes` - * **`_tflite`** (default) - - ENV variables: - - CM_ML_MODEL_ACCURACY: `85` - - CM_ML_MODEL_DATA_LAYOUT: `NHWC` - - CM_ML_MODEL_FRAMEWORK: `tflite` - - CM_ML_MODEL_GIVEN_CHANNEL_MEANS: `` - - CM_ML_MODEL_INPUT_LAYERS: `` - - CM_ML_MODEL_INPUT_LAYER_NAME: `` - - CM_ML_MODEL_INPUT_SHAPES: `` - - CM_ML_MODEL_NORMALIZE_DATA: `0` - - CM_ML_MODEL_OUTPUT_LAYERS: `` - - CM_ML_MODEL_OUTPUT_LAYER_NAME: `` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `<<>>` - - CM_ML_MODEL_SUBTRACT_MEANS: `YES` - -
- - - * Group "**precision**" -
- Click here to expand this section. - - * `_fp32` - - ENV variables: - - CM_ML_MODEL_INPUT_DATA_TYPES: `fp32` - - CM_ML_MODEL_PRECISION: `fp32` - - CM_ML_MODEL_WEIGHT_DATA_TYPES: `fp32` - * **`_int8`** (default) - - ENV variables: - - CM_ML_MODEL_INPUT_DATA_TYPES: `int8` - - CM_ML_MODEL_PRECISION: `int8` - - CM_ML_MODEL_WEIGHT_DATA_TYPES: `int8` - * `_uint8` - - ENV variables: - - CM_ML_MODEL_INPUT_DATA_TYPES: `uint8` - - CM_ML_MODEL_PRECISION: `uint8` - - CM_ML_MODEL_WEIGHT_DATA_TYPES: `uint8` - -
- - - ##### Default variations - - `_int8,_tflite` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-tiny-resnet/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get raw ml-model resnet pretrained tiny model ic ml-model-tiny-resnet image-classification [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-models/get-ml-model-using-imagenet-from-model-zoo/index.md b/docs/scripts/AI-ML-models/get-ml-model-using-imagenet-from-model-zoo/index.md deleted file mode 100644 index 27bce3765..000000000 --- a/docs/scripts/AI-ML-models/get-ml-model-using-imagenet-from-model-zoo/index.md +++ /dev/null @@ -1,95 +0,0 @@ -# get-ml-model-using-imagenet-from-model-zoo -Automatically generated README for this automation recipe: **get-ml-model-using-imagenet-from-model-zoo** - -Category: **[AI/ML models](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ml-model-using-imagenet-from-model-zoo/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get ml-model model-zoo zoo imagenet image-classification" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,ml-model,model-zoo,zoo,imagenet,image-classification[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get ml-model model-zoo zoo imagenet image-classification [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,ml-model,model-zoo,zoo,imagenet,image-classification' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get ml-model model-zoo zoo imagenet image-classification[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**model-source**" -
- Click here to expand this section. - - * `_model.#` - * `_model.resnet101-pytorch-base` - * `_model.resnet50-pruned95-uniform-quant` - -
- - -___ -#### Script output -```bash -cmr "get ml-model model-zoo zoo imagenet image-classification [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-models/get-tvm-model/index.md b/docs/scripts/AI-ML-models/get-tvm-model/index.md deleted file mode 100644 index 4cff76283..000000000 --- a/docs/scripts/AI-ML-models/get-tvm-model/index.md +++ /dev/null @@ -1,188 +0,0 @@ -# get-tvm-model -Automatically generated README for this automation recipe: **get-tvm-model** - -Category: **[AI/ML models](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tvm-model/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tvm-model/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get ml-model-tvm tvm-model" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,ml-model-tvm,tvm-model[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get ml-model-tvm tvm-model [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,ml-model-tvm,tvm-model' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get ml-model-tvm tvm-model[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_tune-model` - - ENV variables: - - CM_TUNE_TVM_MODEL: `yes` - -
- - - * Group "**batchsize**" -
- Click here to expand this section. - - * `_batch_size.#` - - ENV variables: - - CM_ML_MODEL_MAX_BATCH_SIZE: `#` - -
- - - * Group "**frontend**" -
- Click here to expand this section. - - * **`_onnx`** (default) - - ENV variables: - - CM_TVM_FRONTEND_FRAMEWORK: `onnx` - * `_pytorch` - - Aliases: `_torch` - - ENV variables: - - CM_TVM_FRONTEND_FRAMEWORK: `pytorch` - * `_tensorflow` - - Aliases: `_tf` - - ENV variables: - - CM_TVM_FRONTEND_FRAMEWORK: `tensorflow` - * `_tflite` - - ENV variables: - - CM_TVM_FRONTEND_FRAMEWORK: `tflite` - -
- - - * Group "**model**" -
- Click here to expand this section. - - * `_model.#` - - ENV variables: - - CM_ML_MODEL: `#` - -
- - - * Group "**precision**" -
- Click here to expand this section. - - * **`_fp32`** (default) - * `_int8` - * `_uint8` - -
- - - * Group "**runtime**" -
- Click here to expand this section. - - * `_graph_executor` - - ENV variables: - - CM_TVM_USE_VM: `no` - * **`_virtual_machine`** (default) - - ENV variables: - - CM_TVM_USE_VM: `yes` - -
- - - ##### Default variations - - `_fp32,_onnx,_virtual_machine` -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_ML_MODEL_MAX_BATCH_SIZE: `1` - * CM_TUNE_TVM_MODEL: `no` - * CM_TVM_USE_VM: `yes` - * CM_TVM_FRONTEND_FRAMEWORK: `onnx` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tvm-model/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get ml-model-tvm tvm-model [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-models/index.md b/docs/scripts/AI-ML-models/index.md deleted file mode 100644 index c3c12890a..000000000 --- a/docs/scripts/AI-ML-models/index.md +++ /dev/null @@ -1,21 +0,0 @@ -* [convert-ml-model-huggingface-to-onnx](convert-ml-model-huggingface-to-onnx/index.md) -* [get-bert-squad-vocab](get-bert-squad-vocab/index.md) -* [get-dlrm](get-dlrm/index.md) -* [get-ml-model-3d-unet-kits19](get-ml-model-3d-unet-kits19/index.md) -* [get-ml-model-bert-base-squad](get-ml-model-bert-base-squad/index.md) -* [get-ml-model-bert-large-squad](get-ml-model-bert-large-squad/index.md) -* [get-ml-model-dlrm-terabyte](get-ml-model-dlrm-terabyte/index.md) -* [get-ml-model-efficientnet-lite](get-ml-model-efficientnet-lite/index.md) -* [get-ml-model-gptj](get-ml-model-gptj/index.md) -* [get-ml-model-huggingface-zoo](get-ml-model-huggingface-zoo/index.md) -* [get-ml-model-llama2](get-ml-model-llama2/index.md) -* [get-ml-model-mobilenet](get-ml-model-mobilenet/index.md) -* [get-ml-model-neuralmagic-zoo](get-ml-model-neuralmagic-zoo/index.md) -* [get-ml-model-resnet50](get-ml-model-resnet50/index.md) -* [get-ml-model-retinanet](get-ml-model-retinanet/index.md) -* [get-ml-model-retinanet-nvidia](get-ml-model-retinanet-nvidia/index.md) -* [get-ml-model-rnnt](get-ml-model-rnnt/index.md) -* [get-ml-model-stable-diffusion](get-ml-model-stable-diffusion/index.md) -* [get-ml-model-tiny-resnet](get-ml-model-tiny-resnet/index.md) -* [get-ml-model-using-imagenet-from-model-zoo](get-ml-model-using-imagenet-from-model-zoo/index.md) -* [get-tvm-model](get-tvm-model/index.md) diff --git a/docs/scripts/AI-ML-optimization/calibrate-model-for.qaic/index.md b/docs/scripts/AI-ML-optimization/calibrate-model-for.qaic/index.md deleted file mode 100644 index 9c61d1124..000000000 --- a/docs/scripts/AI-ML-optimization/calibrate-model-for.qaic/index.md +++ /dev/null @@ -1,186 +0,0 @@ -# calibrate-model-for.qaic -Automatically generated README for this automation recipe: **calibrate-model-for.qaic** - -Category: **[AI/ML optimization](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/calibrate-model-for.qaic/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "qaic calibrate profile qaic-profile qaic-calibrate" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=qaic,calibrate,profile,qaic-profile,qaic-calibrate[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "qaic calibrate profile qaic-profile qaic-calibrate [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'qaic,calibrate,profile,qaic-profile,qaic-calibrate' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "qaic calibrate profile qaic-profile qaic-calibrate[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_first.#` - -
- - - * Group "**batch-size**" -
- Click here to expand this section. - - * `_bs.#` - - ENV variables: - - CM_QAIC_MODEL_BATCH_SIZE: `#` - - CM_CREATE_INPUT_BATCH: `yes` - * `_bs.1` - - ENV variables: - - CM_QAIC_MODEL_BATCH_SIZE: `1` - - CM_CREATE_INPUT_BATCH: `yes` - -
- - - * Group "**calib-dataset-filter-size**" -
- Click here to expand this section. - - * `_filter-size.#` - -
- - - * Group "**calibration-option**" -
- Click here to expand this section. - - * `_mlperf.option1` - * `_mlperf.option2` - -
- - - * Group "**model**" -
- Click here to expand this section. - - * `_bert-99` - - ENV variables: - - CM_CALIBRATE_SQUAD: `yes` - - CM_QAIC_COMPILER_ARGS: `` - - CM_QAIC_COMPILER_PARAMS: `-onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,<<>> -input-list-file=<<>> -num-histogram-bins=512 -profiling-threads=<<>>` - - CM_QAIC_MODEL_TO_CONVERT: `calibrate_bert_mlperf` - * `_resnet50` - - ENV variables: - - CM_QAIC_MODEL_NAME: `resnet50` - - CM_CALIBRATE_IMAGENET: `yes` - - CM_QAIC_COMPILER_ARGS: `` - - CM_QAIC_COMPILER_PARAMS: `-output-node-name=ArgMax -profiling-threads=<<>>` - - CM_QAIC_OUTPUT_NODE_NAME: `-output-node-name=ArgMax` - - CM_QAIC_MODEL_TO_CONVERT: `calibrate_resnet50_tf` - * `_retinanet` - - ENV variables: - - CM_QAIC_MODEL_NAME: `retinanet` - - CM_CALIBRATE_OPENIMAGES: `yes` - - CM_QAIC_COMPILER_ARGS: `` - - CM_QAIC_COMPILER_PARAMS: `-enable-channelwise -profiling-threads=<<>> -onnx-define-symbol=batch_size,<<>> -node-precision-info=<<>>` - - CM_QAIC_MODEL_TO_CONVERT: `calibrate_retinanet_no_nms_mlperf` - -
- - - * Group "**model-framework**" -
- Click here to expand this section. - - * `_tf` - -
- - - * Group "**seq-length**" -
- Click here to expand this section. - - * `_seq.#` - - ENV variables: - - CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH: `#` - * `_seq.384` - - ENV variables: - - CM_DATASET_SQUAD_TOKENIZED_MAX_SEQ_LENGTH: `#` - -
- - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/calibrate-model-for.qaic/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "qaic calibrate profile qaic-profile qaic-calibrate [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-optimization/compile-model-for.qaic/index.md b/docs/scripts/AI-ML-optimization/compile-model-for.qaic/index.md deleted file mode 100644 index 4bc9d3db7..000000000 --- a/docs/scripts/AI-ML-optimization/compile-model-for.qaic/index.md +++ /dev/null @@ -1,216 +0,0 @@ -# compile-model-for.qaic -Automatically generated README for this automation recipe: **compile-model-for.qaic** - -Category: **[AI/ML optimization](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/compile-model-for.qaic/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "qaic compile model model-compile qaic-compile" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=qaic,compile,model,model-compile,qaic-compile[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "qaic compile model model-compile qaic-compile [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'qaic,compile,model,model-compile,qaic-compile' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "qaic compile model model-compile qaic-compile[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_bert-99` - - ENV variables: - - CM_COMPILE_BERT: `on` - - CM_QAIC_MODEL_TO_CONVERT: `calibrate_bert_mlperf` - - CM_QAIC_MODEL_COMPILER_PARAMS_BASE: `-aic-hw -aic-hw-version=2.0 -execute-nodes-in-fp16=Add,Div,Erf,Softmax -quantization-schema=symmetric_with_uint8 -quantization-precision=Int8 -quantization-precision-bias=Int32 -vvv -compile-only -onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,384 -multicast-weights -combine-inputs=false -combine-outputs=false` - - CM_QAIC_MODEL_COMPILER_ARGS: `` - * `_bert-99.9` - - ENV variables: - - CM_COMPILE_BERT: `on` - - CM_QAIC_MODEL_TO_CONVERT: `bert_mlperf` - - CM_QAIC_MODEL_COMPILER_PARAMS_BASE: `-aic-hw -aic-hw-version=2.0 -convert-to-fp16 -vvv -compile-only -onnx-define-symbol=batch_size,1 -onnx-define-symbol=seg_length,384 -combine-inputs=false -combine-outputs=false` - - CM_QAIC_MODEL_COMPILER_ARGS: `` - * `_resnet50` - - ENV variables: - - CM_COMPILE_RESNET: `on` - - CM_QAIC_MODEL_TO_CONVERT: `compile_resnet50_tf` - - CM_QAIC_MODEL_COMPILER_PARAMS_BASE: `-aic-hw -aic-hw-version=2.0 -quantization-schema=symmetric_with_uint8 -quantization-precision=Int8 -output-node-name=ArgMax -vvv -compile-only -use-producer-dma=1` - * `_retinanet` - - ENV variables: - - CM_COMPILE_RETINANET: `on` - - CM_QAIC_MODEL_TO_CONVERT: `calibrate_retinanet_no_nms_mlperf` - - CM_QAIC_MODEL_COMPILER_ARGS: `-aic-enable-depth-first` - - CM_QAIC_MODEL_COMPILER_PARAMS_BASE: `-aic-hw -aic-hw-version=2.0 -compile-only -enable-channelwise -onnx-define-symbol=batch_size,1 -node-precision-info=<<>> -quantization-schema-constants=symmetric_with_uint8 -quantization-schema-activations=asymmetric -quantization-calibration=None` - -
- - - * Group "**batch-size**" -
- Click here to expand this section. - - * `_bs.#` - - ENV variables: - - CM_QAIC_MODEL_BATCH_SIZE: `#` - * `_bs.1` - - ENV variables: - - CM_QAIC_MODEL_BATCH_SIZE: `1` - -
- - - * Group "**calib-dataset-filter-size**" -
- Click here to expand this section. - - * `_filter-size.#` - -
- - - * Group "**mlperf-scenario**" -
- Click here to expand this section. - - * `_multistream` - * `_offline` - * `_server` - * **`_singlestream`** (default) - -
- - - * Group "**model-framework**" -
- Click here to expand this section. - - * `_tf` - -
- - - * Group "**nsp**" -
- Click here to expand this section. - - * `_nsp.14` - * `_nsp.16` - * `_nsp.8` - * `_nsp.9` - -
- - - * Group "**percentile-calibration**" -
- Click here to expand this section. - - * `_pc.#` - - ENV variables: - - CM_QAIC_MODEL_COMPILER_PERCENTILE_CALIBRATION_VALUE: `#` - - CM_QAIC_MODEL_COMPILER_QUANTIZATION_PARAMS: `-quantization-calibration=Percentile -percentile-calibration-value=<<>>` - -
- - - * Group "**quantization**" -
- Click here to expand this section. - - * `_no-quantized` - - ENV variables: - - CM_QAIC_MODEL_QUANTIZATION: `no` - * **`_quantized`** (default) - - ENV variables: - - CM_QAIC_MODEL_QUANTIZATION: `yes` - -
- - - ##### Default variations - - `_quantized,_singlestream` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--register=value` → `CM_REGISTER_CACHE=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/compile-model-for.qaic/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "qaic compile model model-compile qaic-compile [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/AI-ML-optimization/index.md b/docs/scripts/AI-ML-optimization/index.md deleted file mode 100644 index c1a250851..000000000 --- a/docs/scripts/AI-ML-optimization/index.md +++ /dev/null @@ -1,3 +0,0 @@ -* [calibrate-model-for.qaic](calibrate-model-for.qaic/index.md) -* [compile-model-for.qaic](compile-model-for.qaic/index.md) -* [prune-bert-models](prune-bert-models/index.md) diff --git a/docs/scripts/AI-ML-optimization/prune-bert-models/index.md b/docs/scripts/AI-ML-optimization/prune-bert-models/index.md deleted file mode 100644 index bf9821a7e..000000000 --- a/docs/scripts/AI-ML-optimization/prune-bert-models/index.md +++ /dev/null @@ -1,132 +0,0 @@ -# prune-bert-models -Automatically generated README for this automation recipe: **prune-bert-models** - -Category: **[AI/ML optimization](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/prune-bert-models/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/prune-bert-models/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "prune bert-models bert-prune prune-bert-models" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=prune,bert-models,bert-prune,prune-bert-models[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "prune bert-models bert-prune prune-bert-models [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'prune,bert-models,bert-prune,prune-bert-models' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "prune bert-models bert-prune prune-bert-models[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_model.#` - - ENV variables: - - CM_BERT_PRUNE_MODEL_NAME: `#` - - CM_MODEL_ZOO_STUB: `#` - * `_path.#` - - ENV variables: - - CM_BERT_PRUNE_CKPT_PATH: `#` - * `_task.#` - - ENV variables: - - CM_BERT_PRUNE_TASK: `#` - -
- -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--constraint=value` → `CM_BERT_PRUNE_CONSTRAINT=value` - * `--output_dir=value` → `CM_BERT_PRUNE_OUTPUT_DIR=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_BERT_PRUNE_TASK: `squad` - * CM_BERT_PRUNE_MODEL_NAME: `bert-large-uncased` - * CM_MODEL_ZOO_STUB: `bert-large-uncased` - * CM_BERT_PRUNE_CONSTRAINT: `0.5` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/prune-bert-models/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "prune bert-models bert-prune prune-bert-models [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/CM-Interface/get-cache-dir/index.md b/docs/scripts/CM-Interface/get-cache-dir/index.md deleted file mode 100644 index 6c62118e8..000000000 --- a/docs/scripts/CM-Interface/get-cache-dir/index.md +++ /dev/null @@ -1,95 +0,0 @@ -# get-cache-dir -Automatically generated README for this automation recipe: **get-cache-dir** - -Category: **[CM Interface](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cache-dir/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get cache dir directory" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,cache,dir,directory[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get cache dir directory [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,cache,dir,directory' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get cache dir directory[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_name.#` - - ENV variables: - - CM_CACHE_DIR_NAME: `#` - -
- - -___ -#### Script output -```bash -cmr "get cache dir directory [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/CM-Interface/index.md b/docs/scripts/CM-Interface/index.md deleted file mode 100644 index 32d34042d..000000000 --- a/docs/scripts/CM-Interface/index.md +++ /dev/null @@ -1 +0,0 @@ -* [get-cache-dir](get-cache-dir/index.md) diff --git a/docs/scripts/CM-automation/create-custom-cache-entry/index.md b/docs/scripts/CM-automation/create-custom-cache-entry/index.md deleted file mode 100644 index 178195e07..000000000 --- a/docs/scripts/CM-automation/create-custom-cache-entry/index.md +++ /dev/null @@ -1,92 +0,0 @@ -# create-custom-cache-entry -Automatically generated README for this automation recipe: **create-custom-cache-entry** - -Category: **[CM automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/create-custom-cache-entry/_cm.yaml)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "create custom cache entry" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=create,custom,cache,entry [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "create custom cache entry " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'create,custom,cache,entry' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "create custom cache entry" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--env_key=value` → `CM_CUSTOM_CACHE_ENTRY_ENV_KEY=value` - * `--env_key2=value` → `CM_CUSTOM_CACHE_ENTRY_ENV_KEY2=value` - * `--path=value` → `CM_CUSTOM_CACHE_ENTRY_PATH=value` - * `--to=value` → `CM_CUSTOM_CACHE_ENTRY_PATH=value` - - - - -___ -#### Script output -```bash -cmr "create custom cache entry " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/CM-automation/index.md b/docs/scripts/CM-automation/index.md deleted file mode 100644 index 996533c19..000000000 --- a/docs/scripts/CM-automation/index.md +++ /dev/null @@ -1 +0,0 @@ -* [create-custom-cache-entry](create-custom-cache-entry/index.md) diff --git a/docs/scripts/CM-interface-prototyping/index.md b/docs/scripts/CM-interface-prototyping/index.md deleted file mode 100644 index 927cf1b1a..000000000 --- a/docs/scripts/CM-interface-prototyping/index.md +++ /dev/null @@ -1,2 +0,0 @@ -* [test-debug](test-debug/index.md) -* [test-mlperf-inference-retinanet](test-mlperf-inference-retinanet/index.md) diff --git a/docs/scripts/CM-interface-prototyping/test-debug/index.md b/docs/scripts/CM-interface-prototyping/test-debug/index.md deleted file mode 100644 index 0e848be1a..000000000 --- a/docs/scripts/CM-interface-prototyping/test-debug/index.md +++ /dev/null @@ -1,87 +0,0 @@ -# test-debug -Automatically generated README for this automation recipe: **test-debug** - -Category: **[CM interface prototyping](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/test-debug/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/test-debug/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "test cm-debug" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=test,cm-debug - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "test cm-debug " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'test,cm-debug' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "test cm-debug" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/test-debug/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/test-debug/run.bat) -___ -#### Script output -```bash -cmr "test cm-debug " -j -``` \ No newline at end of file diff --git a/docs/scripts/CM-interface-prototyping/test-mlperf-inference-retinanet/index.md b/docs/scripts/CM-interface-prototyping/test-mlperf-inference-retinanet/index.md deleted file mode 100644 index 406c3a9ad..000000000 --- a/docs/scripts/CM-interface-prototyping/test-mlperf-inference-retinanet/index.md +++ /dev/null @@ -1,86 +0,0 @@ -# test-mlperf-inference-retinanet -Automatically generated README for this automation recipe: **test-mlperf-inference-retinanet** - -Category: **[CM interface prototyping](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/test-mlperf-inference-retinanet/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "test mlperf-inference-win retinanet windows" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=test,mlperf-inference-win,retinanet,windows - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "test mlperf-inference-win retinanet windows " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'test,mlperf-inference-win,retinanet,windows' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "test mlperf-inference-win retinanet windows" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/test-mlperf-inference-retinanet/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/test-mlperf-inference-retinanet/run.bat) -___ -#### Script output -```bash -cmr "test mlperf-inference-win retinanet windows " -j -``` \ No newline at end of file diff --git a/docs/scripts/CUDA-automation/get-cuda-devices/index.md b/docs/scripts/CUDA-automation/get-cuda-devices/index.md deleted file mode 100644 index 7fddb5a55..000000000 --- a/docs/scripts/CUDA-automation/get-cuda-devices/index.md +++ /dev/null @@ -1,86 +0,0 @@ -# get-cuda-devices -Automatically generated README for this automation recipe: **get-cuda-devices** - -Category: **[CUDA automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cuda-devices/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get cuda-devices" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,cuda-devices - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get cuda-devices " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,cuda-devices' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get cuda-devices" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cuda-devices/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cuda-devices/run.bat) -___ -#### Script output -```bash -cmr "get cuda-devices " -j -``` \ No newline at end of file diff --git a/docs/scripts/CUDA-automation/get-cuda/index.md b/docs/scripts/CUDA-automation/get-cuda/index.md deleted file mode 100644 index 5e789f3e6..000000000 --- a/docs/scripts/CUDA-automation/get-cuda/index.md +++ /dev/null @@ -1,158 +0,0 @@ -# get-cuda -Automatically generated README for this automation recipe: **get-cuda** - -Category: **[CUDA automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cuda/README-extra.md) - - ---- - -# System dependencies - -* Download [CUDA toolkit](https://developer.nvidia.com/cuda-toolkit). -* Download [cuDNN](https://developer.nvidia.com/rdp/cudnn-download). -* Download [TensorRT](https://developer.nvidia.com/nvidia-tensorrt-8x-download). - - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cuda/_cm.yaml)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get cuda cuda-compiler cuda-lib toolkit lib nvcc get-nvcc get-cuda 46d133d9ef92422d" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,cuda,cuda-compiler,cuda-lib,toolkit,lib,nvcc,get-nvcc,get-cuda,46d133d9ef92422d[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get cuda cuda-compiler cuda-lib toolkit lib nvcc get-nvcc get-cuda 46d133d9ef92422d [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,cuda,cuda-compiler,cuda-lib,toolkit,lib,nvcc,get-nvcc,get-cuda,46d133d9ef92422d' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get cuda cuda-compiler cuda-lib toolkit lib nvcc get-nvcc get-cuda 46d133d9ef92422d[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_cudnn` - - ENV variables: - - CM_CUDA_NEEDS_CUDNN: `yes` - * `_package-manager` - - ENV variables: - - CM_CUDA_PACKAGE_MANAGER_INSTALL: `yes` - -
- - - * Group "**installation-mode**" -
- Click here to expand this section. - - * `_lib-only` - - ENV variables: - - CM_CUDA_FULL_TOOLKIT_INSTALL: `no` - - CM_TMP_FILE_TO_CHECK_UNIX: `libcudart.so` - - CM_TMP_FILE_TO_CHECK_WINDOWS: `libcudart.dll` - * **`_toolkit`** (default) - - ENV variables: - - CM_CUDA_FULL_TOOLKIT_INSTALL: `yes` - - CM_TMP_FILE_TO_CHECK_UNIX: `nvcc` - - CM_TMP_FILE_TO_CHECK_WINDOWS: `nvcc.exe` - -
- - - ##### Default variations - - `_toolkit` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--cudnn_tar_file=value` → `CM_CUDNN_TAR_FILE_PATH=value` - * `--cudnn_tar_path=value` → `CM_CUDNN_TAR_FILE_PATH=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_CUDA_PATH_LIB_CUDNN_EXISTS: `no` - * CM_REQUIRE_INSTALL: `no` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cuda/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cuda/run.bat) -___ -#### Script output -```bash -cmr "get cuda cuda-compiler cuda-lib toolkit lib nvcc get-nvcc get-cuda 46d133d9ef92422d [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/CUDA-automation/get-cudnn/index.md b/docs/scripts/CUDA-automation/get-cudnn/index.md deleted file mode 100644 index 76655cd84..000000000 --- a/docs/scripts/CUDA-automation/get-cudnn/index.md +++ /dev/null @@ -1,115 +0,0 @@ -# get-cudnn -Automatically generated README for this automation recipe: **get-cudnn** - -Category: **[CUDA automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cudnn/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cudnn/_cm.yaml)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get cudnn nvidia" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,cudnn,nvidia [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get cudnn nvidia " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,cudnn,nvidia' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get cudnn nvidia" [--input_flags] - ``` -___ - -=== "Input Flags" - - - #### Input Flags - - * --**input:** Full path to the installed cuDNN library - * --**tar_file:** Full path to the cuDNN Tar file downloaded from Nvidia website (https://developer.nvidia.com/cudnn) -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--input=value` → `CM_INPUT=value` - * `--tar_file=value` → `CM_CUDNN_TAR_FILE_PATH=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_INPUT: `` - * CM_SUDO: `sudo` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cudnn/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get cudnn nvidia " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/CUDA-automation/get-tensorrt/index.md b/docs/scripts/CUDA-automation/get-tensorrt/index.md deleted file mode 100644 index afa872119..000000000 --- a/docs/scripts/CUDA-automation/get-tensorrt/index.md +++ /dev/null @@ -1,119 +0,0 @@ -# get-tensorrt -Automatically generated README for this automation recipe: **get-tensorrt** - -Category: **[CUDA automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tensorrt/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tensorrt/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get tensorrt nvidia" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,tensorrt,nvidia[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get tensorrt nvidia [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,tensorrt,nvidia' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get tensorrt nvidia[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_dev` - - ENV variables: - - CM_TENSORRT_REQUIRE_DEV: `yes` - -
- -=== "Input Flags" - - - #### Input Flags - - * --**input:** Full path to the installed TensorRT library (nvinfer) - * --**tar_file:** Full path to the TensorRT Tar file downloaded from the Nvidia website (https://developer.nvidia.com/tensorrt) -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--input=value` → `CM_INPUT=value` - * `--tar_file=value` → `CM_TENSORRT_TAR_FILE_PATH=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-tensorrt/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get tensorrt nvidia [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/CUDA-automation/index.md b/docs/scripts/CUDA-automation/index.md deleted file mode 100644 index 335dc83a5..000000000 --- a/docs/scripts/CUDA-automation/index.md +++ /dev/null @@ -1,6 +0,0 @@ -* [get-cuda](get-cuda/index.md) -* [get-cuda-devices](get-cuda-devices/index.md) -* [get-cudnn](get-cudnn/index.md) -* [get-tensorrt](get-tensorrt/index.md) -* [install-cuda-package-manager](install-cuda-package-manager/index.md) -* [install-cuda-prebuilt](install-cuda-prebuilt/index.md) diff --git a/docs/scripts/CUDA-automation/install-cuda-package-manager/index.md b/docs/scripts/CUDA-automation/install-cuda-package-manager/index.md deleted file mode 100644 index 84b7b3e48..000000000 --- a/docs/scripts/CUDA-automation/install-cuda-package-manager/index.md +++ /dev/null @@ -1,87 +0,0 @@ -# install-cuda-package-manager -Automatically generated README for this automation recipe: **install-cuda-package-manager** - -Category: **[CUDA automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-cuda-package-manager/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install package-manager cuda package-manager-cuda install-pm-cuda" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,package-manager,cuda,package-manager-cuda,install-pm-cuda - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install package-manager cuda package-manager-cuda install-pm-cuda " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,package-manager,cuda,package-manager-cuda,install-pm-cuda' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install package-manager cuda package-manager-cuda install-pm-cuda" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run-ubuntu.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-cuda-package-manager/run-ubuntu.sh) - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-cuda-package-manager/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "install package-manager cuda package-manager-cuda install-pm-cuda " -j -``` \ No newline at end of file diff --git a/docs/scripts/CUDA-automation/install-cuda-prebuilt/index.md b/docs/scripts/CUDA-automation/install-cuda-prebuilt/index.md deleted file mode 100644 index 674817343..000000000 --- a/docs/scripts/CUDA-automation/install-cuda-prebuilt/index.md +++ /dev/null @@ -1,138 +0,0 @@ -# install-cuda-prebuilt -Automatically generated README for this automation recipe: **install-cuda-prebuilt** - -Category: **[CUDA automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/install-cuda-prebuilt/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-cuda-prebuilt/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install prebuilt cuda prebuilt-cuda install-prebuilt-cuda" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,prebuilt,cuda,prebuilt-cuda,install-prebuilt-cuda[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install prebuilt cuda prebuilt-cuda install-prebuilt-cuda [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,prebuilt,cuda,prebuilt-cuda,install-prebuilt-cuda' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install prebuilt cuda prebuilt-cuda install-prebuilt-cuda[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**install-driver**" -
- Click here to expand this section. - - * `_driver` - - ENV variables: - - CM_CUDA_INSTALL_DRIVER: `yes` - * **`_no-driver`** (default) - - ENV variables: - - CM_CUDA_INSTALL_DRIVER: `no` - -
- - - ##### Default variations - - `_no-driver` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--local_run_file_path=value` → `CUDA_RUN_FILE_LOCAL_PATH=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_SUDO: `sudo` - - -#### Versions -Default version: `11.8.0` - -* `11.7.0` -* `11.8.0` -* `12.0.0` -* `12.1.1` -* `12.2.0` -* `12.3.2` -* `12.4.1` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-cuda-prebuilt/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "install prebuilt cuda prebuilt-cuda install-prebuilt-cuda [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Cloud-automation/destroy-terraform/index.md b/docs/scripts/Cloud-automation/destroy-terraform/index.md deleted file mode 100644 index 0cdd8886a..000000000 --- a/docs/scripts/Cloud-automation/destroy-terraform/index.md +++ /dev/null @@ -1,87 +0,0 @@ -# destroy-terraform -Automatically generated README for this automation recipe: **destroy-terraform** - -Category: **[Cloud automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/destroy-terraform/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/destroy-terraform/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "destroy terraform cmd" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=destroy,terraform,cmd - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "destroy terraform cmd " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'destroy,terraform,cmd' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "destroy terraform cmd" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/destroy-terraform/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/destroy-terraform/run.bat) -___ -#### Script output -```bash -cmr "destroy terraform cmd " -j -``` \ No newline at end of file diff --git a/docs/scripts/Cloud-automation/get-aws-cli/index.md b/docs/scripts/Cloud-automation/get-aws-cli/index.md deleted file mode 100644 index 9e06d804b..000000000 --- a/docs/scripts/Cloud-automation/get-aws-cli/index.md +++ /dev/null @@ -1,87 +0,0 @@ -# get-aws-cli -Automatically generated README for this automation recipe: **get-aws-cli** - -Category: **[Cloud automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aws-cli/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aws-cli/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get aws-cli aws cli" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,aws-cli,aws,cli - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get aws-cli aws cli " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,aws-cli,aws,cli' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get aws-cli aws cli" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aws-cli/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get aws-cli aws cli " -j -``` \ No newline at end of file diff --git a/docs/scripts/Cloud-automation/get-terraform/index.md b/docs/scripts/Cloud-automation/get-terraform/index.md deleted file mode 100644 index 18c91c264..000000000 --- a/docs/scripts/Cloud-automation/get-terraform/index.md +++ /dev/null @@ -1,87 +0,0 @@ -# get-terraform -Automatically generated README for this automation recipe: **get-terraform** - -Category: **[Cloud automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-terraform/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-terraform/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get terraform get-terraform" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,terraform,get-terraform - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get terraform get-terraform " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,terraform,get-terraform' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get terraform get-terraform" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-terraform/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get terraform get-terraform " -j -``` \ No newline at end of file diff --git a/docs/scripts/Cloud-automation/index.md b/docs/scripts/Cloud-automation/index.md deleted file mode 100644 index 84fc1dc1a..000000000 --- a/docs/scripts/Cloud-automation/index.md +++ /dev/null @@ -1,6 +0,0 @@ -* [destroy-terraform](destroy-terraform/index.md) -* [get-aws-cli](get-aws-cli/index.md) -* [get-terraform](get-terraform/index.md) -* [install-aws-cli](install-aws-cli/index.md) -* [install-terraform-from-src](install-terraform-from-src/index.md) -* [run-terraform](run-terraform/index.md) diff --git a/docs/scripts/Cloud-automation/install-aws-cli/index.md b/docs/scripts/Cloud-automation/install-aws-cli/index.md deleted file mode 100644 index 5973d9c9c..000000000 --- a/docs/scripts/Cloud-automation/install-aws-cli/index.md +++ /dev/null @@ -1,86 +0,0 @@ -# install-aws-cli -Automatically generated README for this automation recipe: **install-aws-cli** - -Category: **[Cloud automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-aws-cli/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install script aws-cli aws cli" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,script,aws-cli,aws,cli - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install script aws-cli aws cli " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,script,aws-cli,aws,cli' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install script aws-cli aws cli" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-aws-cli/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "install script aws-cli aws cli " -j -``` \ No newline at end of file diff --git a/docs/scripts/Cloud-automation/install-terraform-from-src/index.md b/docs/scripts/Cloud-automation/install-terraform-from-src/index.md deleted file mode 100644 index d1cba41e1..000000000 --- a/docs/scripts/Cloud-automation/install-terraform-from-src/index.md +++ /dev/null @@ -1,90 +0,0 @@ -# install-terraform-from-src -Automatically generated README for this automation recipe: **install-terraform-from-src** - -Category: **[Cloud automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-terraform-from-src/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install terraform from-src" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,terraform,from-src - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install terraform from-src " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,terraform,from-src' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install terraform from-src" - ``` -___ - -#### Versions -Default version: `main` - -* `main` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-terraform-from-src/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "install terraform from-src " -j -``` \ No newline at end of file diff --git a/docs/scripts/Cloud-automation/run-terraform/index.md b/docs/scripts/Cloud-automation/run-terraform/index.md deleted file mode 100644 index f164a7352..000000000 --- a/docs/scripts/Cloud-automation/run-terraform/index.md +++ /dev/null @@ -1,388 +0,0 @@ -# run-terraform -Automatically generated README for this automation recipe: **run-terraform** - -Category: **[Cloud automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/run-terraform/README-extra.md) - - ---- - -## Setup for Google Cloud Instances -``` -sudo snap install google-cloud-cli --classic -gcloud auth application-default login -``` - -The above two commands will install google-cloud-cli and authorizes the user to access it. Once done, you can start creating gcp instance using CM commands like below. To destroy an instance just repeat the same command with `--destroy` option. - -``` -cm run script --tags=run,terraform,_gcp,_gcp_project.mlperf-inference-tests --cminit -``` -Here, `mlperf-inference-tests` is the name of the google project as created in [Google cloud console](https://console.cloud.google.com/apis/dashboard) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/run-terraform/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "run terraform" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=run,terraform[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "run terraform [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'run,terraform' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "run terraform[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_amazon-linux-2-kernel.#` - - ENV variables: - - TF_VAR_INSTANCE_IMAGE_OS: `amazon-linux-2-kernel.#` - * `_graviton` - - ENV variables: - - CM_TERRAFORM_AWS_GRAVITON_INSTANCE: `yes` - * `_inferentia` - - ENV variables: - - CM_TERRAFORM_AWS_INFERENTIA_INSTANCE: `yes` - * `_rhel.#` - - ENV variables: - - TF_VAR_INSTANCE_IMAGE_OS: `rhel.#` - * `_ubuntu.#` - - ENV variables: - - TF_VAR_INSTANCE_IMAGE_OS: `ubuntu.#` - -
- - - * Group "**aws-instance-image**" -
- Click here to expand this section. - - * `_amazon-linux-2-kernel.510,arm64,us-west-2` - - ENV variables: - - TF_VAR_INSTANCE_IMAGE: `ami-0f1a5f5ada0e7da53` - * `_aws_instance_image.#` - - ENV variables: - - TF_VAR_INSTANCE_IMAGE: `#` - * `_aws_instance_image.ami-0735c191cf914754d` - - ENV variables: - - TF_VAR_INSTANCE_IMAGE: `ami-0735c191cf914754d` - * `_aws_instance_image.ami-0a0d8589b597d65b3` - - ENV variables: - - TF_VAR_INSTANCE_IMAGE: `ami-0a0d8589b597d65b3` - * `_rhel.9,x86,us-west-2` - - ENV variables: - - TF_VAR_INSTANCE_IMAGE: `ami-0dda7e535b65b6469` - * `_ubuntu.2204,arm64,us-west-2` - - ENV variables: - - TF_VAR_INSTANCE_IMAGE: `ami-079f51a7bcca65b92` - * `_ubuntu.2204,x86,us-west-2` - - ENV variables: - - TF_VAR_INSTANCE_IMAGE: `ami-0735c191cf914754d` - -
- - - * Group "**aws-instance-type**" -
- Click here to expand this section. - - * `_a1.2xlarge` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `a1.2xlarge` - * `_a1.metal` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `a1.metal` - * `_a1.xlarge` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `a1.xlarge` - * `_aws_instance_type.#` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `#` - * `_c5.12xlarge` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `c5.12xlarge` - * `_c5.4xlarge` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `c5.4xlarge` - * `_c5d.9xlarge` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `c5d.9xlarge` - * `_g4dn.xlarge` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `g4dn.xlarge` - * `_inf1.2xlarge` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `inf1.2xlarge` - * `_inf1.xlarge` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `inf1.xlarge` - * `_inf2.8xlarge` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `inf2.8xlarge` - * `_inf2.xlarge` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `inf2.xlarge` - * `_m7g.2xlarge` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `m7g.2xlarge` - * `_m7g.xlarge` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `m7g.xlarge` - * `_t2.#` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `t2.#` - * `_t2.2xlarge` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `t2.2xlarge` - * `_t2.large` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `t2.large` - * `_t2.medium` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `t2.medium` - * `_t2.micro` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `t2.micro` - * `_t2.nano` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `t2.nano` - * `_t2.small` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `t2.small` - * `_t2.xlarge` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `t2.xlarge` - -
- - - * Group "**cloud-provider**" -
- Click here to expand this section. - - * **`_aws`** (default) - - ENV variables: - - CM_TERRAFORM_CONFIG_DIR_NAME: `aws` - * `_gcp` - - ENV variables: - - CM_TERRAFORM_CONFIG_DIR_NAME: `gcp` - -
- - - * Group "**gcp-instance-image**" -
- Click here to expand this section. - - * `_debian-cloud/debian-11` - - ENV variables: - - TF_VAR_INSTANCE_IMAGE: `debian-cloud/debian-11` - * `_gcp_instance_image.#` - - ENV variables: - - TF_VAR_INSTANCE_IMAGE: `#` - * `_ubuntu-2204-jammy-v20230114` - - ENV variables: - - TF_VAR_INSTANCE_IMAGE: `ubuntu-2204-jammy-v20230114` - -
- - - * Group "**gcp-instance-type**" -
- Click here to expand this section. - - * `_f1-micro` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `f1-micro` - * `_gcp_instance_type.#` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `#` - * `_n1-highmem.#` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `n1-highmem-#` - * `_n1-standard.#` - - ENV variables: - - TF_VAR_INSTANCE_TYPE: `n1-highmem-#` - -
- - - * Group "**gcp-project**" -
- Click here to expand this section. - - * `_gcp_project.#` - - ENV variables: - - TF_VAR_GCP_PROJECT: `#` - -
- - - * Group "**instance-name**" -
- Click here to expand this section. - - * `_instance_name.#` - - ENV variables: - - TF_VAR_INSTANCE_NAME: `#` - -
- - - * Group "**platform**" -
- Click here to expand this section. - - * `_arm64` - - ENV variables: - - CM_INSTANCE_PLATFORM: `arm64` - * **`_x86`** (default) - - ENV variables: - - CM_INSTANCE_PLATFORM: `x86` - -
- - - * Group "**region**" -
- Click here to expand this section. - - * `_region.#` - - ENV variables: - - TF_VAR_INSTANCE_REGION: `#` - * `_us-west-2` - - ENV variables: - - TF_VAR_INSTANCE_REGION: `us-west-2` - -
- - - * Group "**storage-size**" -
- Click here to expand this section. - - * `_storage_size.#` - - ENV variables: - - TF_VAR_DISK_GBS: `#` - * `_storage_size.8` - - ENV variables: - - TF_VAR_DISK_GBS: `8` - -
- - - * Group "**zone**" -
- Click here to expand this section. - - * `_zone.#` - - ENV variables: - - TF_VAR_INSTANCE_ZONE: `#` - -
- - - ##### Default variations - - `_aws,_x86` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--cminit=value` → `CM_TERRAFORM_CM_INIT=value` - * `--destroy=value` → `CM_DESTROY_TERRAFORM=value` - * `--gcp_credentials_json_file=value` → `CM_GCP_CREDENTIALS_JSON_PATH=value` - * `--key_file=value` → `CM_SSH_KEY_FILE=value` - * `--run_cmds=value` → `CM_TERRAFORM_RUN_COMMANDS=value` - * `--ssh_key_file=value` → `CM_SSH_KEY_FILE=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * TF_VAR_SECURITY_GROUP_ID: `sg-0783752c97d2e011d` - * TF_VAR_CPU_COUNT: `1` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-terraform/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "run terraform [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Collective-benchmarking/index.md b/docs/scripts/Collective-benchmarking/index.md deleted file mode 100644 index 71dc75a6b..000000000 --- a/docs/scripts/Collective-benchmarking/index.md +++ /dev/null @@ -1 +0,0 @@ -* [launch-benchmark](launch-benchmark/index.md) diff --git a/docs/scripts/Collective-benchmarking/launch-benchmark/index.md b/docs/scripts/Collective-benchmarking/launch-benchmark/index.md deleted file mode 100644 index 4ad86bd2a..000000000 --- a/docs/scripts/Collective-benchmarking/launch-benchmark/index.md +++ /dev/null @@ -1,81 +0,0 @@ -# launch-benchmark -Automatically generated README for this automation recipe: **launch-benchmark** - -Category: **[Collective benchmarking](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/launch-benchmark/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/launch-benchmark/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "launch benchmark" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=launch,benchmark - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "launch benchmark " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'launch,benchmark' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "launch benchmark" - ``` -___ - - -___ -#### Script output -```bash -cmr "launch benchmark " -j -``` \ No newline at end of file diff --git a/docs/scripts/Compiler-automation/get-aocl/index.md b/docs/scripts/Compiler-automation/get-aocl/index.md deleted file mode 100644 index 7ff7292fb..000000000 --- a/docs/scripts/Compiler-automation/get-aocl/index.md +++ /dev/null @@ -1,92 +0,0 @@ -# get-aocl -Automatically generated README for this automation recipe: **get-aocl** - -Category: **[Compiler automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aocl/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aocl/_cm.json)* -* Output cached? *true* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get lib aocl amd-optimized amd" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,lib,aocl,amd-optimized,amd - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get lib aocl amd-optimized amd " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,lib,aocl,amd-optimized,amd' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get lib aocl amd-optimized amd" - ``` -___ - -#### Versions -Default version: `4.0` - -* `4.0` -* `master` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aocl/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get lib aocl amd-optimized amd " -j -``` \ No newline at end of file diff --git a/docs/scripts/Compiler-automation/get-cl/index.md b/docs/scripts/Compiler-automation/get-cl/index.md deleted file mode 100644 index fd2dc6cef..000000000 --- a/docs/scripts/Compiler-automation/get-cl/index.md +++ /dev/null @@ -1,87 +0,0 @@ -# Detect or install Microsoft C compiler -Automatically generated README for this automation recipe: **get-cl** - -Category: **[Compiler automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cl/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cl/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get cl compiler c-compiler cpp-compiler get-cl" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,cl,compiler,c-compiler,cpp-compiler,get-cl - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get cl compiler c-compiler cpp-compiler get-cl " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,cl,compiler,c-compiler,cpp-compiler,get-cl' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get cl compiler c-compiler cpp-compiler get-cl" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - No run file exists for Linux/macOS -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cl/run.bat) -___ -#### Script output -```bash -cmr "get cl compiler c-compiler cpp-compiler get-cl " -j -``` \ No newline at end of file diff --git a/docs/scripts/Compiler-automation/get-compiler-flags/index.md b/docs/scripts/Compiler-automation/get-compiler-flags/index.md deleted file mode 100644 index b1b46b23e..000000000 --- a/docs/scripts/Compiler-automation/get-compiler-flags/index.md +++ /dev/null @@ -1,80 +0,0 @@ -# get-compiler-flags -Automatically generated README for this automation recipe: **get-compiler-flags** - -Category: **[Compiler automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-compiler-flags/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get compiler-flags" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,compiler-flags - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get compiler-flags " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,compiler-flags' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get compiler-flags" - ``` -___ - - -___ -#### Script output -```bash -cmr "get compiler-flags " -j -``` \ No newline at end of file diff --git a/docs/scripts/Compiler-automation/get-compiler-rust/index.md b/docs/scripts/Compiler-automation/get-compiler-rust/index.md deleted file mode 100644 index 90844ce50..000000000 --- a/docs/scripts/Compiler-automation/get-compiler-rust/index.md +++ /dev/null @@ -1,86 +0,0 @@ -# get-compiler-rust -Automatically generated README for this automation recipe: **get-compiler-rust** - -Category: **[Compiler automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-compiler-rust/_cm.yaml)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get rust-compiler" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,rust-compiler - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get rust-compiler " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,rust-compiler' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get rust-compiler" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-compiler-rust/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get rust-compiler " -j -``` \ No newline at end of file diff --git a/docs/scripts/Compiler-automation/get-gcc/index.md b/docs/scripts/Compiler-automation/get-gcc/index.md deleted file mode 100644 index 06913a2fc..000000000 --- a/docs/scripts/Compiler-automation/get-gcc/index.md +++ /dev/null @@ -1,87 +0,0 @@ -# Detect or install GCC compiler -Automatically generated README for this automation recipe: **get-gcc** - -Category: **[Compiler automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-gcc/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-gcc/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get gcc compiler c-compiler cpp-compiler get-gcc" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,gcc,compiler,c-compiler,cpp-compiler,get-gcc - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get gcc compiler c-compiler cpp-compiler get-gcc " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,gcc,compiler,c-compiler,cpp-compiler,get-gcc' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get gcc compiler c-compiler cpp-compiler get-gcc" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-gcc/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-gcc/run.bat) -___ -#### Script output -```bash -cmr "get gcc compiler c-compiler cpp-compiler get-gcc " -j -``` \ No newline at end of file diff --git a/docs/scripts/Compiler-automation/get-go/index.md b/docs/scripts/Compiler-automation/get-go/index.md deleted file mode 100644 index 7d691b01a..000000000 --- a/docs/scripts/Compiler-automation/get-go/index.md +++ /dev/null @@ -1,87 +0,0 @@ -# get-go -Automatically generated README for this automation recipe: **get-go** - -Category: **[Compiler automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-go/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-go/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get tool go get-go" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,tool,go,get-go - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get tool go get-go " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,tool,go,get-go' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get tool go get-go" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-go/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get tool go get-go " -j -``` \ No newline at end of file diff --git a/docs/scripts/Compiler-automation/get-llvm/index.md b/docs/scripts/Compiler-automation/get-llvm/index.md deleted file mode 100644 index 8c5855c88..000000000 --- a/docs/scripts/Compiler-automation/get-llvm/index.md +++ /dev/null @@ -1,101 +0,0 @@ -# Detect or install LLVM compiler -Automatically generated README for this automation recipe: **get-llvm** - -Category: **[Compiler automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-llvm/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-llvm/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get llvm compiler c-compiler cpp-compiler get-llvm" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,llvm,compiler,c-compiler,cpp-compiler,get-llvm[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get llvm compiler c-compiler cpp-compiler get-llvm [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,llvm,compiler,c-compiler,cpp-compiler,get-llvm' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get llvm compiler c-compiler cpp-compiler get-llvm[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_from-prebuilt` - * `_from-src` - -
- - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-llvm/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-llvm/run.bat) -___ -#### Script output -```bash -cmr "get llvm compiler c-compiler cpp-compiler get-llvm [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/Compiler-automation/index.md b/docs/scripts/Compiler-automation/index.md deleted file mode 100644 index d24e5e703..000000000 --- a/docs/scripts/Compiler-automation/index.md +++ /dev/null @@ -1,18 +0,0 @@ -* [get-aocl](get-aocl/index.md) -* [get-cl](get-cl/index.md) -* [get-compiler-flags](get-compiler-flags/index.md) -* [get-compiler-rust](get-compiler-rust/index.md) -* [get-gcc](get-gcc/index.md) -* [get-go](get-go/index.md) -* [get-llvm](get-llvm/index.md) -* [install-gcc-src](install-gcc-src/index.md) -* [install-ipex-from-src](install-ipex-from-src/index.md) -* [install-llvm-prebuilt](install-llvm-prebuilt/index.md) -* [install-llvm-src](install-llvm-src/index.md) -* [install-onednn-from-src](install-onednn-from-src/index.md) -* [install-onnxruntime-from-src](install-onnxruntime-from-src/index.md) -* [install-pytorch-from-src](install-pytorch-from-src/index.md) -* [install-pytorch-kineto-from-src](install-pytorch-kineto-from-src/index.md) -* [install-torchvision-from-src](install-torchvision-from-src/index.md) -* [install-tpp-pytorch-extension](install-tpp-pytorch-extension/index.md) -* [install-transformers-from-src](install-transformers-from-src/index.md) diff --git a/docs/scripts/Compiler-automation/install-gcc-src/index.md b/docs/scripts/Compiler-automation/install-gcc-src/index.md deleted file mode 100644 index 54724e8a7..000000000 --- a/docs/scripts/Compiler-automation/install-gcc-src/index.md +++ /dev/null @@ -1,90 +0,0 @@ -# install-gcc-src -Automatically generated README for this automation recipe: **install-gcc-src** - -Category: **[Compiler automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-gcc-src/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install src gcc src-gcc" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,src,gcc,src-gcc - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install src gcc src-gcc " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,src,gcc,src-gcc' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install src gcc src-gcc" - ``` -___ - -#### Versions -Default version: `12` - -* `master` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-gcc-src/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "install src gcc src-gcc " -j -``` \ No newline at end of file diff --git a/docs/scripts/Compiler-automation/install-ipex-from-src/index.md b/docs/scripts/Compiler-automation/install-ipex-from-src/index.md deleted file mode 100644 index 673ca8376..000000000 --- a/docs/scripts/Compiler-automation/install-ipex-from-src/index.md +++ /dev/null @@ -1,128 +0,0 @@ -# Build IPEX from sources -Automatically generated README for this automation recipe: **install-ipex-from-src** - -Category: **[Compiler automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-ipex-from-src/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install get src from.src ipex src-ipex" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,get,src,from.src,ipex,src-ipex[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install get src from.src ipex src-ipex [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,get,src,from.src,ipex,src-ipex' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install get src from.src ipex src-ipex[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_branch.#` - - ENV variables: - - CM_GIT_CHECKOUT: `#` - * `_for-intel-mlperf-inference-v3.1-gptj` - - ENV variables: - - CM_CONDA_ENV: `yes` - * `_sha.#` - - ENV variables: - - CM_GIT_CHECKOUT_SHA: `#` - * `_tag.#` - - ENV variables: - - CM_GIT_CHECKOUT_TAG: `#` - -
- - - * Group "**repo**" -
- Click here to expand this section. - - * `_repo.#` - - ENV variables: - - CM_GIT_URL: `#` - * **`_repo.https://github.com/intel/intel-extension-for-pytorch`** (default) - - ENV variables: - - CM_GIT_URL: `https://github.com/intel/intel-extension-for-pytorch` - -
- - - ##### Default variations - - `_repo.https://github.com/intel/intel-extension-for-pytorch` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-ipex-from-src/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "install get src from.src ipex src-ipex [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/Compiler-automation/install-llvm-prebuilt/index.md b/docs/scripts/Compiler-automation/install-llvm-prebuilt/index.md deleted file mode 100644 index 96038406e..000000000 --- a/docs/scripts/Compiler-automation/install-llvm-prebuilt/index.md +++ /dev/null @@ -1,90 +0,0 @@ -# Install prebuilt LLVM compiler -Automatically generated README for this automation recipe: **install-llvm-prebuilt** - -Category: **[Compiler automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/install-llvm-prebuilt/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-llvm-prebuilt/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install prebuilt llvm prebuilt-llvm install-prebuilt-llvm" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,prebuilt,llvm,prebuilt-llvm,install-prebuilt-llvm - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install prebuilt llvm prebuilt-llvm install-prebuilt-llvm " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,prebuilt,llvm,prebuilt-llvm,install-prebuilt-llvm' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install prebuilt llvm prebuilt-llvm install-prebuilt-llvm" - ``` -___ - -#### Versions -Default version: `15.0.6` - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-llvm-prebuilt/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/install-llvm-prebuilt/run.bat) -___ -#### Script output -```bash -cmr "install prebuilt llvm prebuilt-llvm install-prebuilt-llvm " -j -``` \ No newline at end of file diff --git a/docs/scripts/Compiler-automation/install-llvm-src/index.md b/docs/scripts/Compiler-automation/install-llvm-src/index.md deleted file mode 100644 index 655046f73..000000000 --- a/docs/scripts/Compiler-automation/install-llvm-src/index.md +++ /dev/null @@ -1,160 +0,0 @@ -# Build LLVM compiler from sources (can take >30 min) -Automatically generated README for this automation recipe: **install-llvm-src** - -Category: **[Compiler automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-llvm-src/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install src llvm from.src src-llvm" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,src,llvm,from.src,src-llvm[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install src llvm from.src src-llvm [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,src,llvm,from.src,src-llvm' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install src llvm from.src src-llvm[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_branch.#` - - ENV variables: - - CM_GIT_CHECKOUT: `#` - * `_for-intel-mlperf-inference-v3.1-bert` - - ENV variables: - - CM_LLVM_CONDA_ENV: `yes` - * `_for-intel-mlperf-inference-v3.1-gptj` - - ENV variables: - - CM_LLVM_CONDA_ENV: `yes` - - CM_LLVM_16_INTEL_MLPERF_INFERENCE: `yes` - - USE_CUDA: `0` - - CUDA_VISIBLE_DEVICES: `` - * `_full-history` - * `_runtimes.#` - - ENV variables: - - CM_LLVM_ENABLE_RUNTIMES: `#` - * `_sha.#` - - ENV variables: - - CM_GIT_CHECKOUT_SHA: `#` - * `_tag.#` - - ENV variables: - - CM_GIT_CHECKOUT_TAG: `#` - -
- - - * Group "**build-type**" -
- Click here to expand this section. - - * `_debug` - - ENV variables: - - CM_LLVM_BUILD_TYPE: `debug` - * **`_release`** (default) - - ENV variables: - - CM_LLVM_BUILD_TYPE: `release` - -
- - - * Group "**clang**" -
- Click here to expand this section. - - * **`_clang`** (default) - - ENV variables: - - CM_LLVM_ENABLE_PROJECTS: `clang` - -
- - - * Group "**repo**" -
- Click here to expand this section. - - * `_repo.#` - - ENV variables: - - CM_GIT_URL: `#` - -
- - - ##### Default variations - - `_clang,_release` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-llvm-src/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "install src llvm from.src src-llvm [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/Compiler-automation/install-onednn-from-src/index.md b/docs/scripts/Compiler-automation/install-onednn-from-src/index.md deleted file mode 100644 index 49bb4844b..000000000 --- a/docs/scripts/Compiler-automation/install-onednn-from-src/index.md +++ /dev/null @@ -1,129 +0,0 @@ -# Build oneDNN from sources -Automatically generated README for this automation recipe: **install-onednn-from-src** - -Category: **[Compiler automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-onednn-from-src/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install get src from.src onednn src-onednn" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,get,src,from.src,onednn,src-onednn[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install get src from.src onednn src-onednn [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,get,src,from.src,onednn,src-onednn' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install get src from.src onednn src-onednn[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_branch.#` - - ENV variables: - - CM_GIT_CHECKOUT: `#` - * `_for-intel-mlperf-inference-v3.1-bert` - - ENV variables: - - CM_CONDA_ENV: `yes` - - CM_FOR_INTEL_MLPERF_INFERENCE: `yes` - * `_sha.#` - - ENV variables: - - CM_GIT_CHECKOUT_SHA: `#` - * `_tag.#` - - ENV variables: - - CM_GIT_CHECKOUT_TAG: `#` - -
- - - * Group "**repo**" -
- Click here to expand this section. - - * `_repo.#` - - ENV variables: - - CM_GIT_URL: `#` - * **`_repo.https://github.com/oneapi-src/oneDNN`** (default) - - ENV variables: - - CM_GIT_URL: `https://github.com/oneapi-src/oneDNN` - -
- - - ##### Default variations - - `_repo.https://github.com/oneapi-src/oneDNN` - -#### Native script being run -=== "Linux/macOS" - * [run-intel-mlperf-inference.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-onednn-from-src/run-intel-mlperf-inference.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "install get src from.src onednn src-onednn [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/Compiler-automation/install-onnxruntime-from-src/index.md b/docs/scripts/Compiler-automation/install-onnxruntime-from-src/index.md deleted file mode 100644 index 011956f1d..000000000 --- a/docs/scripts/Compiler-automation/install-onnxruntime-from-src/index.md +++ /dev/null @@ -1,125 +0,0 @@ -# Build onnxruntime from sources -Automatically generated README for this automation recipe: **install-onnxruntime-from-src** - -Category: **[Compiler automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-onnxruntime-from-src/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install get src from.src onnxruntime src-onnxruntime" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,get,src,from.src,onnxruntime,src-onnxruntime[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install get src from.src onnxruntime src-onnxruntime [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,get,src,from.src,onnxruntime,src-onnxruntime' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install get src from.src onnxruntime src-onnxruntime[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_branch.#` - - ENV variables: - - CM_GIT_CHECKOUT: `#` - * `_cuda` - - ENV variables: - - CM_ONNXRUNTIME_GPU: `yes` - * `_sha.#` - - ENV variables: - - CM_GIT_CHECKOUT_SHA: `#` - * `_tag.#` - - ENV variables: - - CM_GIT_CHECKOUT_TAG: `#` - -
- - - * Group "**repo**" -
- Click here to expand this section. - - * **`_repo.https://github.com/Microsoft/onnxruntime`** (default) - - ENV variables: - - CM_GIT_URL: `https://github.com/Microsoft/onnxruntime` - -
- - - ##### Default variations - - `_repo.https://github.com/Microsoft/onnxruntime` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-onnxruntime-from-src/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "install get src from.src onnxruntime src-onnxruntime [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/Compiler-automation/install-pytorch-from-src/index.md b/docs/scripts/Compiler-automation/install-pytorch-from-src/index.md deleted file mode 100644 index 4c7c18512..000000000 --- a/docs/scripts/Compiler-automation/install-pytorch-from-src/index.md +++ /dev/null @@ -1,143 +0,0 @@ -# Build pytorch from sources -Automatically generated README for this automation recipe: **install-pytorch-from-src** - -Category: **[Compiler automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-pytorch-from-src/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install get src from.src pytorch src-pytorch" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,get,src,from.src,pytorch,src-pytorch[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install get src from.src pytorch src-pytorch [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,get,src,from.src,pytorch,src-pytorch' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install get src from.src pytorch src-pytorch[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_branch.#` - - ENV variables: - - CM_GIT_CHECKOUT: `#` - * `_cuda` - - ENV variables: - - CUDA_HOME: `<<>>` - - CUDNN_LIBRARY_PATH: `<<>>` - - CUDNN_INCLUDE_PATH: `<<>>` - - CUDA_NVCC_EXECUTABLE: `<<>>` - - USE_CUDA: `1` - - USE_CUDNN: `1` - - TORCH_CUDA_ARCH_LIST: `Ampere Ada Hopper` - - TORCH_CXX_FLAGS: `-D_GLIBCXX_USE_CXX11_ABI=1` - * `_for-intel-mlperf-inference-v3.1-bert` - - ENV variables: - - CM_CONDA_ENV: `yes` - - CM_MLPERF_INFERENCE_INTEL: `yes` - - USE_CUDA: `0` - * `_for-nvidia-mlperf-inference-v3.1` - * `_for-nvidia-mlperf-inference-v4.0` - * `_sha.#` - - ENV variables: - - CM_GIT_CHECKOUT_SHA: `#` - * `_tag.#` - - ENV variables: - - CM_GIT_CHECKOUT_TAG: `#` - -
- - - * Group "**repo**" -
- Click here to expand this section. - - * `_repo.#` - - ENV variables: - - CM_GIT_URL: `#` - * **`_repo.https://github.com/pytorch/pytorch`** (default) - - ENV variables: - - CM_GIT_URL: `https://github.com/pytorch/pytorch` - -
- - - ##### Default variations - - `_repo.https://github.com/pytorch/pytorch` - -#### Native script being run -=== "Linux/macOS" - * [run-intel-mlperf-inference-v3_1.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-pytorch-from-src/run-intel-mlperf-inference-v3_1.sh) - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-pytorch-from-src/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "install get src from.src pytorch src-pytorch [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/Compiler-automation/install-pytorch-kineto-from-src/index.md b/docs/scripts/Compiler-automation/install-pytorch-kineto-from-src/index.md deleted file mode 100644 index 99cb8893f..000000000 --- a/docs/scripts/Compiler-automation/install-pytorch-kineto-from-src/index.md +++ /dev/null @@ -1,135 +0,0 @@ -# Build pytorch kineto from sources -Automatically generated README for this automation recipe: **install-pytorch-kineto-from-src** - -Category: **[Compiler automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-pytorch-kineto-from-src/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install get src from.src pytorch-kineto kineto src-pytorch-kineto" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,get,src,from.src,pytorch-kineto,kineto,src-pytorch-kineto[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install get src from.src pytorch-kineto kineto src-pytorch-kineto [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,get,src,from.src,pytorch-kineto,kineto,src-pytorch-kineto' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install get src from.src pytorch-kineto kineto src-pytorch-kineto[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_branch.#` - - ENV variables: - - CM_GIT_CHECKOUT: `#` - * `_cuda` - - ENV variables: - - CUDA_HOME: `<<>>` - - CUDA_NVCC_EXECUTABLE: `<<>>` - - CUDNN_INCLUDE_PATH: `<<>>` - - CUDNN_LIBRARY_PATH: `<<>>` - - TORCH_CUDA_ARCH_LIST: `Ampere Ada Hopper` - - TORCH_CXX_FLAGS: `-D_GLIBCXX_USE_CXX11_ABI=1` - - USE_CUDA: `1` - - USE_CUDNN: `1` - * `_sha.#` - - ENV variables: - - CM_GIT_CHECKOUT_SHA: `#` - * `_tag.#` - - ENV variables: - - CM_GIT_CHECKOUT_TAG: `#` - -
- - - * Group "**repo**" -
- Click here to expand this section. - - * `_repo.#` - - ENV variables: - - CM_GIT_URL: `#` - * **`_repo.https://github.com/pytorch/kineto`** (default) - - ENV variables: - - CM_GIT_URL: `https://github.com/pytorch/kineto` - -
- - - ##### Default variations - - `_repo.https://github.com/pytorch/kineto` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-pytorch-kineto-from-src/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "install get src from.src pytorch-kineto kineto src-pytorch-kineto [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/Compiler-automation/install-torchvision-from-src/index.md b/docs/scripts/Compiler-automation/install-torchvision-from-src/index.md deleted file mode 100644 index 296969afb..000000000 --- a/docs/scripts/Compiler-automation/install-torchvision-from-src/index.md +++ /dev/null @@ -1,137 +0,0 @@ -# Build pytorchvision from sources -Automatically generated README for this automation recipe: **install-torchvision-from-src** - -Category: **[Compiler automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-torchvision-from-src/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install get src from.src pytorchvision torchvision src-pytorchvision" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,get,src,from.src,pytorchvision,torchvision,src-pytorchvision[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install get src from.src pytorchvision torchvision src-pytorchvision [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,get,src,from.src,pytorchvision,torchvision,src-pytorchvision' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install get src from.src pytorchvision torchvision src-pytorchvision[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_branch.#` - - ENV variables: - - CM_GIT_CHECKOUT: `#` - * `_cuda` - - ENV variables: - - CUDA_HOME: `<<>>` - - CUDA_NVCC_EXECUTABLE: `<<>>` - - CUDNN_INCLUDE_PATH: `<<>>` - - CUDNN_LIBRARY_PATH: `<<>>` - - USE_CUDA: `1` - - USE_CUDNN: `1` - - TORCH_CUDA_ARCH_LIST: `Ampere Ada Hopper` - - TORCH_CXX_FLAGS: `-D_GLIBCXX_USE_CXX11_ABI=1` - * `_for-nvidia-mlperf-inference-v3.1` - * `_for-nvidia-mlperf-inference-v4.0` - * `_sha.#` - - ENV variables: - - CM_GIT_CHECKOUT_SHA: `#` - * `_tag.#` - - ENV variables: - - CM_GIT_CHECKOUT_TAG: `#` - -
- - - * Group "**repo**" -
- Click here to expand this section. - - * `_repo.#` - - ENV variables: - - CM_GIT_URL: `#` - * **`_repo.https://github.com/pytorch/vision`** (default) - - ENV variables: - - CM_GIT_URL: `https://github.com/pytorch/vision` - -
- - - ##### Default variations - - `_repo.https://github.com/pytorch/vision` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-torchvision-from-src/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "install get src from.src pytorchvision torchvision src-pytorchvision [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/Compiler-automation/install-tpp-pytorch-extension/index.md b/docs/scripts/Compiler-automation/install-tpp-pytorch-extension/index.md deleted file mode 100644 index 2b681138d..000000000 --- a/docs/scripts/Compiler-automation/install-tpp-pytorch-extension/index.md +++ /dev/null @@ -1,128 +0,0 @@ -# Build TPP-PEX from sources -Automatically generated README for this automation recipe: **install-tpp-pytorch-extension** - -Category: **[Compiler automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-tpp-pytorch-extension/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install get src from.src tpp-pex src-tpp-pex" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,get,src,from.src,tpp-pex,src-tpp-pex[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install get src from.src tpp-pex src-tpp-pex [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,get,src,from.src,tpp-pex,src-tpp-pex' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install get src from.src tpp-pex src-tpp-pex[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_branch.#` - - ENV variables: - - CM_GIT_CHECKOUT: `#` - * `_for-intel-mlperf-inference-v3.1-gptj` - - ENV variables: - - CM_CONDA_ENV: `yes` - * `_sha.#` - - ENV variables: - - CM_GIT_CHECKOUT_SHA: `#` - * `_tag.#` - - ENV variables: - - CM_GIT_CHECKOUT_TAG: `#` - -
- - - * Group "**repo**" -
- Click here to expand this section. - - * `_repo.#` - - ENV variables: - - CM_GIT_URL: `#` - * **`_repo.https://github.com/libxsmm/tpp-pytorch-extension`** (default) - - ENV variables: - - CM_GIT_URL: `https://github.com/libxsmm/tpp-pytorch-extension` - -
- - - ##### Default variations - - `_repo.https://github.com/libxsmm/tpp-pytorch-extension` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-tpp-pytorch-extension/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "install get src from.src tpp-pex src-tpp-pex [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/Compiler-automation/install-transformers-from-src/index.md b/docs/scripts/Compiler-automation/install-transformers-from-src/index.md deleted file mode 100644 index 78b59b731..000000000 --- a/docs/scripts/Compiler-automation/install-transformers-from-src/index.md +++ /dev/null @@ -1,128 +0,0 @@ -# Build transformers from sources -Automatically generated README for this automation recipe: **install-transformers-from-src** - -Category: **[Compiler automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-transformers-from-src/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install src from.src transformers src-transformers" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,src,from.src,transformers,src-transformers[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install src from.src transformers src-transformers [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,src,from.src,transformers,src-transformers' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install src from.src transformers src-transformers[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_branch.#` - - ENV variables: - - CM_GIT_CHECKOUT: `#` - * `_for-intel-mlperf-inference-v3.1-bert` - - ENV variables: - - CM_CONDA_ENV: `yes` - * `_sha.#` - - ENV variables: - - CM_GIT_CHECKOUT_SHA: `#` - * `_tag.#` - - ENV variables: - - CM_GIT_CHECKOUT_TAG: `#` - -
- - - * Group "**repo**" -
- Click here to expand this section. - - * `_repo.#` - - ENV variables: - - CM_GIT_URL: `#` - * **`_repo.https://github.com/pytorch/pytorch`** (default) - - ENV variables: - - CM_GIT_URL: `https://github.com/huggingface/transformers` - -
- - - ##### Default variations - - `_repo.https://github.com/pytorch/pytorch` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-transformers-from-src/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "install src from.src transformers src-transformers [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/Dashboard-automation/index.md b/docs/scripts/Dashboard-automation/index.md deleted file mode 100644 index 855c981b1..000000000 --- a/docs/scripts/Dashboard-automation/index.md +++ /dev/null @@ -1 +0,0 @@ -* [publish-results-to-dashboard](publish-results-to-dashboard/index.md) diff --git a/docs/scripts/Dashboard-automation/publish-results-to-dashboard/index.md b/docs/scripts/Dashboard-automation/publish-results-to-dashboard/index.md deleted file mode 100644 index e496c921b..000000000 --- a/docs/scripts/Dashboard-automation/publish-results-to-dashboard/index.md +++ /dev/null @@ -1,86 +0,0 @@ -# publish-results-to-dashboard -Automatically generated README for this automation recipe: **publish-results-to-dashboard** - -Category: **[Dashboard automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/publish-results-to-dashboard/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "publish-results dashboard" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=publish-results,dashboard - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "publish-results dashboard " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'publish-results,dashboard' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "publish-results dashboard" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/publish-results-to-dashboard/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/publish-results-to-dashboard/run.bat) -___ -#### Script output -```bash -cmr "publish-results dashboard " -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-android-sdk/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-android-sdk/index.md deleted file mode 100644 index 916d64295..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-android-sdk/index.md +++ /dev/null @@ -1,109 +0,0 @@ -# get-android-sdk -Automatically generated README for this automation recipe: **get-android-sdk** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-android-sdk/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-android-sdk/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get android sdk android-sdk" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,android,sdk,android-sdk [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get android sdk android-sdk " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,android,sdk,android-sdk' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get android sdk android-sdk" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--android_cmake_version=value` → `CM_ANDROID_CMAKE_VERSION=value` - * `--android_ndk_version=value` → `CM_ANDROID_NDK_VERSION=value` - * `--android_version=value` → `CM_ANDROID_VERSION=value` - * `--build_tools_version=value` → `CM_ANDROID_BUILD_TOOLS_VERSION=value` - * `--cmdline_tools_version=value` → `CM_ANDROID_CMDLINE_TOOLS_VERSION=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_ANDROID_BUILD_TOOLS_VERSION: `29.0.3` - * CM_ANDROID_CMAKE_VERSION: `3.6.4111459` - * CM_ANDROID_CMDLINE_TOOLS_URL: `https://dl.google.com/android/repository/commandlinetools-${CM_ANDROID_CMDLINE_TOOLS_OS}-${CM_ANDROID_CMDLINE_TOOLS_VERSION}_latest.zip` - * CM_ANDROID_CMDLINE_TOOLS_VERSION: `9123335` - * CM_ANDROID_NDK_VERSION: `21.3.6528147` - * CM_ANDROID_VERSION: `30` - - - -___ -#### Script output -```bash -cmr "get android sdk android-sdk " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-aria2/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-aria2/index.md deleted file mode 100644 index 020185dd7..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-aria2/index.md +++ /dev/null @@ -1,97 +0,0 @@ -# get-aria2 -Automatically generated README for this automation recipe: **get-aria2** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aria2/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aria2/_cm.yaml)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get aria2 get-aria2" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,aria2,get-aria2 [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get aria2 get-aria2 " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,aria2,get-aria2' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get aria2 get-aria2" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--install=value` → `CM_FORCE_INSTALL=value` - * `--src=value` → `CM_ARIA2_BUILD_FROM_SRC=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aria2/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-aria2/run.bat) -___ -#### Script output -```bash -cmr "get aria2 get-aria2 " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-bazel/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-bazel/index.md deleted file mode 100644 index b891263a8..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-bazel/index.md +++ /dev/null @@ -1,87 +0,0 @@ -# get-bazel -Automatically generated README for this automation recipe: **get-bazel** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-bazel/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-bazel/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get bazel get-bazel" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,bazel,get-bazel - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get bazel get-bazel " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,bazel,get-bazel' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get bazel get-bazel" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-bazel/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-bazel/run.bat) -___ -#### Script output -```bash -cmr "get bazel get-bazel " -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-blis/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-blis/index.md deleted file mode 100644 index 565ded732..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-blis/index.md +++ /dev/null @@ -1,110 +0,0 @@ -# get-blis -Automatically generated README for this automation recipe: **get-blis** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-blis/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-blis/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get lib blis" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,lib,blis[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get lib blis [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,lib,blis' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get lib blis[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**source**" -
- Click here to expand this section. - - * `_amd` - * **`_flame`** (default) - -
- - - ##### Default variations - - `_flame` -#### Versions -Default version: `master` - -* `0.9.0` -* `master` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-blis/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-blis/run.bat) -___ -#### Script output -```bash -cmr "get lib blis [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-brew/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-brew/index.md deleted file mode 100644 index 4e31f81c9..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-brew/index.md +++ /dev/null @@ -1,86 +0,0 @@ -# get-brew -Automatically generated README for this automation recipe: **get-brew** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-brew/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get brew" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,brew - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get brew " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,brew' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get brew" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-brew/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get brew " -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-cmake/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-cmake/index.md deleted file mode 100644 index 1b1f97f9c..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-cmake/index.md +++ /dev/null @@ -1,86 +0,0 @@ -# get-cmake -Automatically generated README for this automation recipe: **get-cmake** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cmake/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get cmake get-cmake" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,cmake,get-cmake - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get cmake get-cmake " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,cmake,get-cmake' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get cmake get-cmake" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cmake/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cmake/run.bat) -___ -#### Script output -```bash -cmr "get cmake get-cmake " -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-cmsis_5/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-cmsis_5/index.md deleted file mode 100644 index 21ab4045c..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-cmsis_5/index.md +++ /dev/null @@ -1,123 +0,0 @@ -# get-cmsis_5 -Automatically generated README for this automation recipe: **get-cmsis_5** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cmsis_5/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cmsis_5/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get cmsis cmsis_5 arm-software" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,cmsis,cmsis_5,arm-software[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get cmsis cmsis_5 arm-software [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,cmsis,cmsis_5,arm-software' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get cmsis cmsis_5 arm-software[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_recurse-submodules` - - ENV variables: - - CM_GIT_RECURSE_SUBMODULES: `--recurse-submodules` - * `_short-history` - - ENV variables: - - CM_GIT_DEPTH: `--depth 10` - -
- -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_GIT_DEPTH: `` - * CM_GIT_PATCH: `no` - * CM_GIT_URL: `https://github.com/ARM-software/CMSIS_5.git` - - -#### Versions -Default version: `custom` - -* `custom` -* `develop` -* `master` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-cmsis_5/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get cmsis cmsis_5 arm-software [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-docker/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-docker/index.md deleted file mode 100644 index 66e6de1a9..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-docker/index.md +++ /dev/null @@ -1,86 +0,0 @@ -# get-docker -Automatically generated README for this automation recipe: **get-docker** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-docker/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get install docker engine" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,install,docker,engine - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get install docker engine " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,install,docker,engine' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get install docker engine" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run-ubuntu.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-docker/run-ubuntu.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get install docker engine " -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-generic-sys-util/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-generic-sys-util/index.md deleted file mode 100644 index 72bcf7044..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-generic-sys-util/index.md +++ /dev/null @@ -1,214 +0,0 @@ -# get-generic-sys-util -Automatically generated README for this automation recipe: **get-generic-sys-util** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-generic-sys-util/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get sys-util generic generic-sys-util" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,sys-util,generic,generic-sys-util[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get sys-util generic generic-sys-util [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,sys-util,generic,generic-sys-util' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get sys-util generic generic-sys-util[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_g++-12` - - ENV variables: - - CM_SYS_UTIL_NAME: `g++12` - * `_gflags-dev` - - ENV variables: - - CM_SYS_UTIL_NAME: `gflags-dev` - * `_git-lfs` - - ENV variables: - - CM_SYS_UTIL_NAME: `git-lfs` - * `_glog-dev` - - ENV variables: - - CM_SYS_UTIL_NAME: `glog-dev` - * `_libboost-all-dev` - - ENV variables: - - CM_SYS_UTIL_NAME: `libboost-all-dev` - * `_libbz2-dev` - - ENV variables: - - CM_SYS_UTIL_NAME: `libbz2_dev` - * `_libev-dev` - - ENV variables: - - CM_SYS_UTIL_NAME: `libev_dev` - * `_libffi-dev` - - ENV variables: - - CM_SYS_UTIL_NAME: `libffi_dev` - * `_libffi7` - - ENV variables: - - CM_SYS_UTIL_NAME: `libffi7` - * `_libgdbm-dev` - - ENV variables: - - CM_SYS_UTIL_NAME: `libgdbm_dev` - * `_libgmock-dev` - - ENV variables: - - CM_SYS_UTIL_NAME: `libgmock-dev` - * `_liblzma-dev` - - ENV variables: - - CM_SYS_UTIL_NAME: `liblzma_dev` - * `_libmpfr-dev` - - ENV variables: - - CM_SYS_UTIL_NAME: `libmpfr-dev` - * `_libncurses-dev` - - ENV variables: - - CM_SYS_UTIL_NAME: `libncurses_dev` - * `_libnuma-dev` - - ENV variables: - - CM_SYS_UTIL_NAME: `libnuma-dev` - * `_libpci-dev` - - ENV variables: - - CM_SYS_UTIL_NAME: `libpci-dev` - * `_libre2-dev` - - ENV variables: - - CM_SYS_UTIL_NAME: `libre2-dev` - * `_libreadline-dev` - - ENV variables: - - CM_SYS_UTIL_NAME: `libreadline_dev` - * `_libsqlite3-dev` - - ENV variables: - - CM_SYS_UTIL_NAME: `libsqlite3_dev` - * `_libssl-dev` - - ENV variables: - - CM_SYS_UTIL_NAME: `libssl_dev` - * `_libudev-dev` - - ENV variables: - - CM_SYS_UTIL_NAME: `libudev-dev` - * `_ninja-build` - - ENV variables: - - CM_SYS_UTIL_NAME: `ninja-build` - * `_nlohmann-json3-dev` - - ENV variables: - - CM_SYS_UTIL_NAME: `nlohmann_json3_dev` - * `_ntpdate` - - ENV variables: - - CM_SYS_UTIL_NAME: `ntpdate` - * `_numactl` - - ENV variables: - - CM_SYS_UTIL_NAME: `numactl` - * `_nvidia-cuda-toolkit` - - ENV variables: - - CM_SYS_UTIL_NAME: `nvidia-cuda-toolkit` - * `_rapidjson-dev` - - ENV variables: - - CM_SYS_UTIL_NAME: `rapidjson-dev` - * `_rsync` - - ENV variables: - - CM_SYS_UTIL_NAME: `rsync` - * `_screen` - - ENV variables: - - CM_SYS_UTIL_NAME: `screen` - * `_sox` - - ENV variables: - - CM_SYS_UTIL_NAME: `sox` - * `_tk-dev` - - ENV variables: - - CM_SYS_UTIL_NAME: `tk_dev` - * `_transmission` - - ENV variables: - - CM_SYS_UTIL_NAME: `transmission` - * `_wget` - - ENV variables: - - CM_SYS_UTIL_NAME: `wget` - * `_zlib` - - ENV variables: - - CM_SYS_UTIL_NAME: `zlib` - * `_zlib1g-dev` - - ENV variables: - - CM_SYS_UTIL_NAME: `zlib1g_dev` - -
- -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_CLEAN_DIRS: `bin` - * CM_SUDO: `sudo` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-generic-sys-util/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get sys-util generic generic-sys-util [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-google-test/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-google-test/index.md deleted file mode 100644 index 92b5250f1..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-google-test/index.md +++ /dev/null @@ -1,89 +0,0 @@ -# get-google-test -Automatically generated README for this automation recipe: **get-google-test** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-google-test/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get google-test googletest gtest test google" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,google-test,googletest,gtest,test,google - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get google-test googletest gtest test google " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,google-test,googletest,gtest,test,google' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get google-test googletest gtest test google" - ``` -___ - -#### Versions -Default version: `1.14.0` - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-google-test/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get google-test googletest gtest test google " -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-java/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-java/index.md deleted file mode 100644 index f0c5f5ac6..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-java/index.md +++ /dev/null @@ -1,124 +0,0 @@ -# get-java -Automatically generated README for this automation recipe: **get-java** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-java/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-java/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get java" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,java[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get java [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,java' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get java[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_install` - - ENV variables: - - CM_JAVA_PREBUILT_INSTALL: `on` - -
- -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--install=value` → `CM_JAVA_PREBUILT_INSTALL=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_JAVA_PREBUILT_VERSION: `19` - * CM_JAVA_PREBUILT_BUILD: `36` - * CM_JAVA_PREBUILT_URL: `https://download.java.net/openjdk/jdk${CM_JAVA_PREBUILT_VERSION}/ri/` - * CM_JAVA_PREBUILT_FILENAME: `openjdk-${CM_JAVA_PREBUILT_VERSION}+${CM_JAVA_PREBUILT_BUILD}_${CM_JAVA_PREBUILT_HOST_OS}-x64_bin` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-java/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-java/run.bat) -___ -#### Script output -```bash -cmr "get java [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-javac/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-javac/index.md deleted file mode 100644 index fae70fe46..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-javac/index.md +++ /dev/null @@ -1,124 +0,0 @@ -# get-javac -Automatically generated README for this automation recipe: **get-javac** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-javac/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-javac/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get javac" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,javac[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get javac [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,javac' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get javac[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_install` - - ENV variables: - - CM_JAVAC_PREBUILT_INSTALL: `on` - -
- -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--install=value` → `CM_JAVAC_PREBUILT_INSTALL=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_JAVAC_PREBUILT_VERSION: `19` - * CM_JAVAC_PREBUILT_BUILD: `36` - * CM_JAVAC_PREBUILT_URL: `https://download.java.net/openjdk/jdk${CM_JAVAC_PREBUILT_VERSION}/ri/` - * CM_JAVAC_PREBUILT_FILENAME: `openjdk-${CM_JAVAC_PREBUILT_VERSION}+${CM_JAVAC_PREBUILT_BUILD}_${CM_JAVAC_PREBUILT_HOST_OS}-x64_bin` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-javac/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-javac/run.bat) -___ -#### Script output -```bash -cmr "get javac [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-armnn/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-armnn/index.md deleted file mode 100644 index 99b740e1f..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-armnn/index.md +++ /dev/null @@ -1,92 +0,0 @@ -# get-lib-armnn -Automatically generated README for this automation recipe: **get-lib-armnn** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-lib-armnn/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get lib-armnn lib armnn" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,lib-armnn,lib,armnn - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get lib-armnn lib armnn " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,lib-armnn,lib,armnn' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get lib-armnn lib armnn" - ``` -___ - -#### Versions -Default version: `23.11` - -* `22.11` -* `23.05` -* `23.11` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-lib-armnn/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get lib-armnn lib armnn " -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-dnnl/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-dnnl/index.md deleted file mode 100644 index b0860ce56..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-dnnl/index.md +++ /dev/null @@ -1,91 +0,0 @@ -# get-lib-dnnl -Automatically generated README for this automation recipe: **get-lib-dnnl** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-lib-dnnl/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get lib-dnnl lib dnnl" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,lib-dnnl,lib,dnnl - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get lib-dnnl lib dnnl " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,lib-dnnl,lib,dnnl' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get lib-dnnl lib dnnl" - ``` -___ - -#### Versions -Default version: `dev` - -* `2.2.4` -* `dev` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-lib-dnnl/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get lib-dnnl lib dnnl " -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-protobuf/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-protobuf/index.md deleted file mode 100644 index 5786390ad..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-protobuf/index.md +++ /dev/null @@ -1,107 +0,0 @@ -# get-lib-protobuf -Automatically generated README for this automation recipe: **get-lib-protobuf** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-lib-protobuf/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get google-protobuf protobuf lib lib-protobuf google" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,google-protobuf,protobuf,lib,lib-protobuf,google[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get google-protobuf protobuf lib lib-protobuf google [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,google-protobuf,protobuf,lib,lib-protobuf,google' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get google-protobuf protobuf lib lib-protobuf google[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_branch.#` - - ENV variables: - - CM_TMP_GIT_CHECKOUT: `#` - * `_tag.#` - - ENV variables: - - CM_GIT_CHECKOUT_TAG: `#` - -
- -#### Versions -Default version: `1.13.0` - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-lib-protobuf/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get google-protobuf protobuf lib lib-protobuf google [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-qaic-api/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-qaic-api/index.md deleted file mode 100644 index 012b061ee..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-lib-qaic-api/index.md +++ /dev/null @@ -1,90 +0,0 @@ -# get-lib-qaic-api -Automatically generated README for this automation recipe: **get-lib-qaic-api** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-lib-qaic-api/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get api lib-qaic-api lib qaic" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,api,lib-qaic-api,lib,qaic - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get api lib-qaic-api lib qaic " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,api,lib-qaic-api,lib,qaic' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get api lib-qaic-api lib qaic" - ``` -___ - -#### Versions -Default version: `master` - -* `master` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-lib-qaic-api/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get api lib-qaic-api lib qaic " -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-nvidia-docker/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-nvidia-docker/index.md deleted file mode 100644 index 11bd4a211..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-nvidia-docker/index.md +++ /dev/null @@ -1,86 +0,0 @@ -# get-nvidia-docker -Automatically generated README for this automation recipe: **get-nvidia-docker** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-nvidia-docker/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get install nvidia nvidia-container-toolkit nvidia-docker engine" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,install,nvidia,nvidia-container-toolkit,nvidia-docker,engine - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get install nvidia nvidia-container-toolkit nvidia-docker engine " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,install,nvidia,nvidia-container-toolkit,nvidia-docker,engine' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get install nvidia nvidia-container-toolkit nvidia-docker engine" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run-ubuntu.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-nvidia-docker/run-ubuntu.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get install nvidia nvidia-container-toolkit nvidia-docker engine " -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-openssl/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-openssl/index.md deleted file mode 100644 index 9eda1419e..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-openssl/index.md +++ /dev/null @@ -1,87 +0,0 @@ -# get-openssl -Automatically generated README for this automation recipe: **get-openssl** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-openssl/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-openssl/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get openssl lib lib-openssl" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,openssl,lib,lib-openssl - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get openssl lib lib-openssl " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,openssl,lib,lib-openssl' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get openssl lib lib-openssl" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-openssl/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get openssl lib lib-openssl " -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-rclone/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-rclone/index.md deleted file mode 100644 index bf494897c..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-rclone/index.md +++ /dev/null @@ -1,107 +0,0 @@ -# get-rclone -Automatically generated README for this automation recipe: **get-rclone** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-rclone/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get rclone" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,rclone[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get rclone [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,rclone' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get rclone[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_gdrive` - - ENV variables: - - CM_RCLONE_GDRIVE: `yes` - * `_system` - - ENV variables: - - CM_RCLONE_SYSTEM: `yes` - -
- -#### Versions -Default version: `1.65.2` - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-rclone/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-rclone/run.bat) -___ -#### Script output -```bash -cmr "get rclone [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-sys-utils-cm/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-sys-utils-cm/index.md deleted file mode 100644 index 558aa7601..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-sys-utils-cm/index.md +++ /dev/null @@ -1,115 +0,0 @@ -# get-sys-utils-cm -Automatically generated README for this automation recipe: **get-sys-utils-cm** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm/_cm.yaml)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get sys-utils-cm" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,sys-utils-cm[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get sys-utils-cm [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,sys-utils-cm' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get sys-utils-cm[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_user` - - ENV variables: - - CM_PYTHON_PIP_USER: `--user` - -
- -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--skip=value` → `CM_SKIP_SYS_UTILS=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run-arch.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm/run-arch.sh) - * [run-debian.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm/run-debian.sh) - * [run-macos.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm/run-macos.sh) - * [run-rhel.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm/run-rhel.sh) - * [run-sles.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm/run-sles.sh) - * [run-ubuntu.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-cm/run-ubuntu.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get sys-utils-cm [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-sys-utils-min/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-sys-utils-min/index.md deleted file mode 100644 index 7166bf7a2..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-sys-utils-min/index.md +++ /dev/null @@ -1,80 +0,0 @@ -# get-sys-utils-min -Automatically generated README for this automation recipe: **get-sys-utils-min** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-sys-utils-min/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get sys-utils-min" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,sys-utils-min - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get sys-utils-min " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,sys-utils-min' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get sys-utils-min" - ``` -___ - - -___ -#### Script output -```bash -cmr "get sys-utils-min " -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-xilinx-sdk/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-xilinx-sdk/index.md deleted file mode 100644 index 147e88815..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-xilinx-sdk/index.md +++ /dev/null @@ -1,99 +0,0 @@ -# get-xilinx-sdk -Automatically generated README for this automation recipe: **get-xilinx-sdk** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-xilinx-sdk/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get xilinx sdk" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,xilinx,sdk [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get xilinx sdk " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,xilinx,sdk' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get xilinx sdk" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--input=value` → `CM_XILINX_SDK_FILE_PATH=value` - - - -#### Versions -Default version: `2019.1` - -* `2019.1` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-xilinx-sdk/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get xilinx sdk " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-zendnn/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-zendnn/index.md deleted file mode 100644 index 58d04192e..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/get-zendnn/index.md +++ /dev/null @@ -1,86 +0,0 @@ -# get-zendnn -Automatically generated README for this automation recipe: **get-zendnn** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zendnn/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get zendnn amd from.src" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,zendnn,amd,from.src - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get zendnn amd from.src " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,zendnn,amd,from.src' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get zendnn amd from.src" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zendnn/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zendnn/run.bat) -___ -#### Script output -```bash -cmr "get zendnn amd from.src " -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/index.md deleted file mode 100644 index 2f5ec2c54..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/index.md +++ /dev/null @@ -1,30 +0,0 @@ -* [get-android-sdk](get-android-sdk/index.md) -* [get-aria2](get-aria2/index.md) -* [get-bazel](get-bazel/index.md) -* [get-blis](get-blis/index.md) -* [get-brew](get-brew/index.md) -* [get-cmake](get-cmake/index.md) -* [get-cmsis_5](get-cmsis_5/index.md) -* [get-docker](get-docker/index.md) -* [get-generic-sys-util](get-generic-sys-util/index.md) -* [get-google-test](get-google-test/index.md) -* [get-java](get-java/index.md) -* [get-javac](get-javac/index.md) -* [get-lib-armnn](get-lib-armnn/index.md) -* [get-lib-dnnl](get-lib-dnnl/index.md) -* [get-lib-protobuf](get-lib-protobuf/index.md) -* [get-lib-qaic-api](get-lib-qaic-api/index.md) -* [get-nvidia-docker](get-nvidia-docker/index.md) -* [get-openssl](get-openssl/index.md) -* [get-rclone](get-rclone/index.md) -* [get-sys-utils-cm](get-sys-utils-cm/index.md) -* [get-sys-utils-min](get-sys-utils-min/index.md) -* [get-xilinx-sdk](get-xilinx-sdk/index.md) -* [get-zendnn](get-zendnn/index.md) -* [install-bazel](install-bazel/index.md) -* [install-cmake-prebuilt](install-cmake-prebuilt/index.md) -* [install-gflags](install-gflags/index.md) -* [install-github-cli](install-github-cli/index.md) -* [install-intel-neural-speed-from-src](install-intel-neural-speed-from-src/index.md) -* [install-numactl-from-src](install-numactl-from-src/index.md) -* [install-openssl](install-openssl/index.md) diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-bazel/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-bazel/index.md deleted file mode 100644 index d9dee3a52..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-bazel/index.md +++ /dev/null @@ -1,90 +0,0 @@ -# install-bazel -Automatically generated README for this automation recipe: **install-bazel** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-bazel/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install script bazel" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,script,bazel - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install script bazel " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,script,bazel' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install script bazel" - ``` -___ - -#### Versions -Default version: `7.0.2` - - -#### Native script being run -=== "Linux/macOS" - * [run-aarch64.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-bazel/run-aarch64.sh) - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-bazel/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/install-bazel/run.bat) -___ -#### Script output -```bash -cmr "install script bazel " -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-cmake-prebuilt/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-cmake-prebuilt/index.md deleted file mode 100644 index b02d49ed1..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-cmake-prebuilt/index.md +++ /dev/null @@ -1,89 +0,0 @@ -# install-cmake-prebuilt -Automatically generated README for this automation recipe: **install-cmake-prebuilt** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-cmake-prebuilt/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install prebuilt cmake prebuilt-cmake install-prebuilt-cmake" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,prebuilt,cmake,prebuilt-cmake,install-prebuilt-cmake - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install prebuilt cmake prebuilt-cmake install-prebuilt-cmake " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,prebuilt,cmake,prebuilt-cmake,install-prebuilt-cmake' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install prebuilt cmake prebuilt-cmake install-prebuilt-cmake" - ``` -___ - -#### Versions -Default version: `3.28.3` - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-cmake-prebuilt/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "install prebuilt cmake prebuilt-cmake install-prebuilt-cmake " -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-gflags/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-gflags/index.md deleted file mode 100644 index adc3b0922..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-gflags/index.md +++ /dev/null @@ -1,90 +0,0 @@ -# install-gflags -Automatically generated README for this automation recipe: **install-gflags** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-gflags/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install src get gflags" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,src,get,gflags - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install src get gflags " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,src,get,gflags' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install src get gflags" - ``` -___ - -#### Versions -Default version: `2.2.2` - -* `2.2.2` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-gflags/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "install src get gflags " -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-github-cli/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-github-cli/index.md deleted file mode 100644 index 36276fc96..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-github-cli/index.md +++ /dev/null @@ -1,88 +0,0 @@ -# install-github-cli -Automatically generated README for this automation recipe: **install-github-cli** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-github-cli/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install gh github cli github-cli" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,gh,github,cli,github-cli - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install gh github cli github-cli " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,gh,github,cli,github-cli' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install gh github cli github-cli" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run-macos.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-github-cli/run-macos.sh) - * [run-rhel.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-github-cli/run-rhel.sh) - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-github-cli/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "install gh github cli github-cli " -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-intel-neural-speed-from-src/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-intel-neural-speed-from-src/index.md deleted file mode 100644 index 36266b661..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-intel-neural-speed-from-src/index.md +++ /dev/null @@ -1,126 +0,0 @@ -# Build Intel Neural Speed from sources -Automatically generated README for this automation recipe: **install-intel-neural-speed-from-src** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-intel-neural-speed-from-src/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install src from.src neural-speed intel-neural-speed" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,src,from.src,neural-speed,intel-neural-speed[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install src from.src neural-speed intel-neural-speed [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,src,from.src,neural-speed,intel-neural-speed' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install src from.src neural-speed intel-neural-speed[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_branch.#` - - ENV variables: - - CM_GIT_CHECKOUT: `#` - * `_for-intel-mlperf-inference-v4.0-gptj` - * `_sha.#` - - ENV variables: - - CM_GIT_CHECKOUT_SHA: `#` - * `_tag.#` - - ENV variables: - - CM_GIT_CHECKOUT_TAG: `#` - -
- - - * Group "**repo**" -
- Click here to expand this section. - - * `_repo.#` - - ENV variables: - - CM_GIT_URL: `#` - * **`_repo.https://github.com/intel/neural-speed`** (default) - - ENV variables: - - CM_GIT_URL: `https://github.com/intel/neural-speed` - -
- - - ##### Default variations - - `_repo.https://github.com/intel/neural-speed` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-intel-neural-speed-from-src/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "install src from.src neural-speed intel-neural-speed [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-numactl-from-src/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-numactl-from-src/index.md deleted file mode 100644 index 6c2808bea..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-numactl-from-src/index.md +++ /dev/null @@ -1,125 +0,0 @@ -# Build numactl from sources -Automatically generated README for this automation recipe: **install-numactl-from-src** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-numactl-from-src/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install src from.src numactl src-numactl" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,src,from.src,numactl,src-numactl[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install src from.src numactl src-numactl [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,src,from.src,numactl,src-numactl' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install src from.src numactl src-numactl[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_branch.#` - - ENV variables: - - CM_GIT_CHECKOUT: `#` - * `_sha.#` - - ENV variables: - - CM_GIT_CHECKOUT_SHA: `#` - * `_tag.#` - - ENV variables: - - CM_GIT_CHECKOUT_TAG: `#` - -
- - - * Group "**repo**" -
- Click here to expand this section. - - * `_repo.#` - - ENV variables: - - CM_GIT_URL: `#` - * **`_repo.https://github.com/numactl/numactl`** (default) - - ENV variables: - - CM_GIT_URL: `https://github.com/numactl/numactl` - -
- - - ##### Default variations - - `_repo.https://github.com/numactl/numactl` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-numactl-from-src/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "install src from.src numactl src-numactl [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-openssl/index.md b/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-openssl/index.md deleted file mode 100644 index 1e41f8cc6..000000000 --- a/docs/scripts/Detection-or-installation-of-tools-and-artifacts/install-openssl/index.md +++ /dev/null @@ -1,90 +0,0 @@ -# install-openssl -Automatically generated README for this automation recipe: **install-openssl** - -Category: **[Detection or installation of tools and artifacts](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-openssl/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install src openssl openssl-lib" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,src,openssl,openssl-lib - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install src openssl openssl-lib " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,src,openssl,openssl-lib' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install src openssl openssl-lib" - ``` -___ - -#### Versions -Default version: `1.1.1` - -* `1.1.1` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-openssl/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "install src openssl openssl-lib " -j -``` \ No newline at end of file diff --git a/docs/scripts/DevOps-automation/benchmark-program/index.md b/docs/scripts/DevOps-automation/benchmark-program/index.md deleted file mode 100644 index 0c940eff9..000000000 --- a/docs/scripts/DevOps-automation/benchmark-program/index.md +++ /dev/null @@ -1,114 +0,0 @@ -# benchmark-program -Automatically generated README for this automation recipe: **benchmark-program** - -Category: **[DevOps automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-program/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-program/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "benchmark program" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=benchmark,program[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "benchmark program [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'benchmark,program' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "benchmark program[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_numactl` - * `_numactl-interleave` - * `_profile` - -
- -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_ENABLE_NUMACTL: `0` - * CM_ENABLE_PROFILING: `0` - - - -#### Native script being run -=== "Linux/macOS" - * [run-ubuntu.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-program/run-ubuntu.sh) - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-program/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-program/run.bat) -___ -#### Script output -```bash -cmr "benchmark program [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/DevOps-automation/compile-program/index.md b/docs/scripts/DevOps-automation/compile-program/index.md deleted file mode 100644 index 51e8e7ece..000000000 --- a/docs/scripts/DevOps-automation/compile-program/index.md +++ /dev/null @@ -1,97 +0,0 @@ -# compile-program -Automatically generated README for this automation recipe: **compile-program** - -Category: **[DevOps automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/compile-program/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/compile-program/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "compile program c-program cpp-program compile-program compile-c-program compile-cpp-program" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=compile,program,c-program,cpp-program,compile-program,compile-c-program,compile-cpp-program - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "compile program c-program cpp-program compile-program compile-c-program compile-cpp-program " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'compile,program,c-program,cpp-program,compile-program,compile-c-program,compile-cpp-program' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "compile program c-program cpp-program compile-program compile-c-program compile-cpp-program" - ``` -___ - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * SKIP_RECOMPILE: `no` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/compile-program/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/compile-program/run.bat) -___ -#### Script output -```bash -cmr "compile program c-program cpp-program compile-program compile-c-program compile-cpp-program " -j -``` \ No newline at end of file diff --git a/docs/scripts/DevOps-automation/convert-csv-to-md/index.md b/docs/scripts/DevOps-automation/convert-csv-to-md/index.md deleted file mode 100644 index 30ee7342f..000000000 --- a/docs/scripts/DevOps-automation/convert-csv-to-md/index.md +++ /dev/null @@ -1,96 +0,0 @@ -# convert-csv-to-md -Automatically generated README for this automation recipe: **convert-csv-to-md** - -Category: **[DevOps automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/convert-csv-to-md/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "csv-to-md convert to-md from-csv" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=csv-to-md,convert,to-md,from-csv [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "csv-to-md convert to-md from-csv " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'csv-to-md,convert,to-md,from-csv' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "csv-to-md convert to-md from-csv" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--csv_file=value` → `CM_CSV_FILE=value` - * `--md_file=value` → `CM_MD_FILE=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/convert-csv-to-md/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/convert-csv-to-md/run.bat) -___ -#### Script output -```bash -cmr "csv-to-md convert to-md from-csv " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/DevOps-automation/copy-to-clipboard/index.md b/docs/scripts/DevOps-automation/copy-to-clipboard/index.md deleted file mode 100644 index 3612d0dc5..000000000 --- a/docs/scripts/DevOps-automation/copy-to-clipboard/index.md +++ /dev/null @@ -1,98 +0,0 @@ -# copy-to-clipboard -Automatically generated README for this automation recipe: **copy-to-clipboard** - -Category: **[DevOps automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/copy-to-clipboard/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "copy to clipboard copy-to-clipboard" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=copy,to,clipboard,copy-to-clipboard [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "copy to clipboard copy-to-clipboard " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'copy,to,clipboard,copy-to-clipboard' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "copy to clipboard copy-to-clipboard" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--add_quotes=value` → `CM_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES=value` - * `--q=value` → `CM_COPY_TO_CLIPBOARD_TEXT_ADD_QUOTES=value` - * `--t=value` → `CM_COPY_TO_CLIPBOARD_TEXT=value` - * `--text=value` → `CM_COPY_TO_CLIPBOARD_TEXT=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/copy-to-clipboard/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/copy-to-clipboard/run.bat) -___ -#### Script output -```bash -cmr "copy to clipboard copy-to-clipboard " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/DevOps-automation/create-conda-env/index.md b/docs/scripts/DevOps-automation/create-conda-env/index.md deleted file mode 100644 index bef5e7517..000000000 --- a/docs/scripts/DevOps-automation/create-conda-env/index.md +++ /dev/null @@ -1,101 +0,0 @@ -# create-conda-env -Automatically generated README for this automation recipe: **create-conda-env** - -Category: **[DevOps automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/create-conda-env/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "create get env conda-env conda-environment create-conda-environment" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=create,get,env,conda-env,conda-environment,create-conda-environment[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "create get env conda-env conda-environment create-conda-environment [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'create,get,env,conda-env,conda-environment,create-conda-environment' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "create get env conda-env conda-environment create-conda-environment[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_name.#` - - ENV variables: - - CM_CONDA_ENV_NAME: `#` - -
- - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/create-conda-env/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "create get env conda-env conda-environment create-conda-environment [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/DevOps-automation/create-patch/index.md b/docs/scripts/DevOps-automation/create-patch/index.md deleted file mode 100644 index 05d7c4279..000000000 --- a/docs/scripts/DevOps-automation/create-patch/index.md +++ /dev/null @@ -1,92 +0,0 @@ -# create-patch -Automatically generated README for this automation recipe: **create-patch** - -Category: **[DevOps automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/create-patch/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/create-patch/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "create patch" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=create,patch [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "create patch " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'create,patch' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "create patch" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--exclude=value` → `CM_CREATE_PATCH_EXCLUDE=value` - * `--new=value` → `CM_CREATE_PATCH_NEW=value` - * `--old=value` → `CM_CREATE_PATCH_OLD=value` - - - - -___ -#### Script output -```bash -cmr "create patch " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/DevOps-automation/detect-sudo/index.md b/docs/scripts/DevOps-automation/detect-sudo/index.md deleted file mode 100644 index 9bb3a47f0..000000000 --- a/docs/scripts/DevOps-automation/detect-sudo/index.md +++ /dev/null @@ -1,86 +0,0 @@ -# detect-sudo -Automatically generated README for this automation recipe: **detect-sudo** - -Category: **[DevOps automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-sudo/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "detect sudo access" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=detect,sudo,access - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "detect sudo access " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'detect,sudo,access' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "detect sudo access" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-sudo/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "detect sudo access " -j -``` \ No newline at end of file diff --git a/docs/scripts/DevOps-automation/download-and-extract/index.md b/docs/scripts/DevOps-automation/download-and-extract/index.md deleted file mode 100644 index 1bb91aa3f..000000000 --- a/docs/scripts/DevOps-automation/download-and-extract/index.md +++ /dev/null @@ -1,145 +0,0 @@ -# download-and-extract -Automatically generated README for this automation recipe: **download-and-extract** - -Category: **[DevOps automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/download-and-extract/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/download-and-extract/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "download-and-extract file" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=download-and-extract,file[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "download-and-extract file [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'download-and-extract,file' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "download-and-extract file[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_extract` - - ENV variables: - - CM_DAE_EXTRACT_DOWNLOADED: `yes` - * `_keep` - - ENV variables: - - CM_EXTRACT_REMOVE_EXTRACTED: `no` - * `_no-remove-extracted` - - ENV variables: - - CM_EXTRACT_REMOVE_EXTRACTED: `no` - * `_url.#` - - ENV variables: - - CM_DAE_URL: `#` - -
- - - * Group "**download-tool**" -
- Click here to expand this section. - - * **`_cmutil`** (default) - * `_curl` - * `_gdown` - * `_rclone` - * `_torrent` - - ENV variables: - - CM_DAE_DOWNLOAD_USING_TORRENT: `yes` - - CM_TORRENT_DOWNLOADED_FILE_NAME: `<<>>` - - CM_TORRENT_DOWNLOADED_PATH_ENV_KEY: `CM_DAE_FILEPATH` - - CM_TORRENT_WAIT_UNTIL_COMPLETED: `yes` - * `_wget` - -
- - - ##### Default variations - - `_cmutil` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--download_path=value` → `CM_DOWNLOAD_PATH=value` - * `--extra_folder=value` → `CM_EXTRACT_TO_FOLDER=value` - * `--extract_path=value` → `CM_EXTRACT_PATH=value` - * `--from=value` → `CM_DOWNLOAD_LOCAL_FILE_PATH=value` - * `--local_path=value` → `CM_DOWNLOAD_LOCAL_FILE_PATH=value` - * `--store=value` → `CM_DOWNLOAD_PATH=value` - * `--to=value` → `CM_EXTRACT_PATH=value` - * `--url=value` → `CM_DAE_URL=value` - * `--verify=value` → `CM_VERIFY_SSL=value` - - - - -___ -#### Script output -```bash -cmr "download-and-extract file [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/DevOps-automation/download-file/index.md b/docs/scripts/DevOps-automation/download-file/index.md deleted file mode 100644 index 2899d4941..000000000 --- a/docs/scripts/DevOps-automation/download-file/index.md +++ /dev/null @@ -1,156 +0,0 @@ -# download-file -Automatically generated README for this automation recipe: **download-file** - -Category: **[DevOps automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/download-file/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/download-file/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "download file" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=download,file[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "download file [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'download,file' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "download file[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_url.#` - - ENV variables: - - CM_DOWNLOAD_URL: `#` - -
- - - * Group "**download-tool**" -
- Click here to expand this section. - - * **`_cmutil`** (default) - - ENV variables: - - CM_DOWNLOAD_TOOL: `cmutil` - * `_curl` - - ENV variables: - - CM_DOWNLOAD_TOOL: `curl` - * `_gdown` - - ENV variables: - - CM_DOWNLOAD_TOOL: `gdown` - * `_rclone` - - ENV variables: - - CM_DOWNLOAD_TOOL: `rclone` - * `_wget` - - ENV variables: - - CM_DOWNLOAD_TOOL: `wget` - -
- - - ##### Default variations - - `_cmutil` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--download_path=value` → `CM_DOWNLOAD_PATH=value` - * `--from=value` → `CM_DOWNLOAD_LOCAL_FILE_PATH=value` - * `--local_path=value` → `CM_DOWNLOAD_LOCAL_FILE_PATH=value` - * `--md5sum=value` → `CM_DOWNLOAD_CHECKSUM=value` - * `--output_file=value` → `CM_DOWNLOAD_FILENAME=value` - * `--store=value` → `CM_DOWNLOAD_PATH=value` - * `--url=value` → `CM_DOWNLOAD_URL=value` - * `--verify=value` → `CM_VERIFY_SSL=value` - * `--verify_ssl=value` → `CM_VERIFY_SSL=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_RCLONE_COPY_USING: `sync` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/download-file/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/download-file/run.bat) -___ -#### Script output -```bash -cmr "download file [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/DevOps-automation/download-torrent/index.md b/docs/scripts/DevOps-automation/download-torrent/index.md deleted file mode 100644 index 3d2aecbdb..000000000 --- a/docs/scripts/DevOps-automation/download-torrent/index.md +++ /dev/null @@ -1,120 +0,0 @@ -# download-torrent -Automatically generated README for this automation recipe: **download-torrent** - -Category: **[DevOps automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/download-torrent/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "download torrent download-torrent" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=download,torrent,download-torrent[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "download torrent download-torrent [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'download,torrent,download-torrent' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "download torrent download-torrent[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_torrent.#` - - ENV variables: - - CM_TORRENT_FILE: `#` - -
- -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--wait=value` → `CM_TORRENT_WAIT_UNTIL_COMPLETED=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_TORRENT_WAIT_UNTIL_COMPLETED: `no` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/download-torrent/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "download torrent download-torrent [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/DevOps-automation/extract-file/index.md b/docs/scripts/DevOps-automation/extract-file/index.md deleted file mode 100644 index c58463bb8..000000000 --- a/docs/scripts/DevOps-automation/extract-file/index.md +++ /dev/null @@ -1,120 +0,0 @@ -# extract-file -Automatically generated README for this automation recipe: **extract-file** - -Category: **[DevOps automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/extract-file/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/extract-file/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "extract file" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=extract,file[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "extract file [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'extract,file' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "extract file[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_keep` - - ENV variables: - - CM_EXTRACT_REMOVE_EXTRACTED: `no` - * `_no-remove-extracted` - - ENV variables: - - CM_EXTRACT_REMOVE_EXTRACTED: `no` - * `_path.#` - - ENV variables: - - CM_EXTRACT_FILEPATH: `#` - -
- -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--extra_folder=value` → `CM_EXTRACT_TO_FOLDER=value` - * `--extract_path=value` → `CM_EXTRACT_PATH=value` - * `--input=value` → `CM_EXTRACT_FILEPATH=value` - * `--to=value` → `CM_EXTRACT_PATH=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/extract-file/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/extract-file/run.bat) -___ -#### Script output -```bash -cmr "extract file [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/DevOps-automation/fail/index.md b/docs/scripts/DevOps-automation/fail/index.md deleted file mode 100644 index 811924c37..000000000 --- a/docs/scripts/DevOps-automation/fail/index.md +++ /dev/null @@ -1,96 +0,0 @@ -# fail -Automatically generated README for this automation recipe: **fail** - -Category: **[DevOps automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/fail/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/fail/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "fail filter" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=fail,filter[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "fail filter [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'fail,filter' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "fail filter[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_windows` - - ENV variables: - - CM_FAIL_WINDOWS: `True` - -
- - -___ -#### Script output -```bash -cmr "fail filter [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/DevOps-automation/get-conda/index.md b/docs/scripts/DevOps-automation/get-conda/index.md deleted file mode 100644 index 904deffb8..000000000 --- a/docs/scripts/DevOps-automation/get-conda/index.md +++ /dev/null @@ -1,115 +0,0 @@ -# get-conda -Automatically generated README for this automation recipe: **get-conda** - -Category: **[DevOps automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-conda/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get conda get-conda" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,conda,get-conda[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get conda get-conda [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,conda,get-conda' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get conda get-conda[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_name.#` - - ENV variables: - - CM_CONDA_PREFIX_NAME: `#` - -
- - - * Group "**conda-python**" -
- Click here to expand this section. - - * `_python-3.#` - - ENV variables: - - CM_CONDA_PYTHON_VERSION: `3.#` - * `_python-3.8` - - ENV variables: - - CM_CONDA_PYTHON_VERSION: `3.8` - -
- - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-conda/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-conda/run.bat) -___ -#### Script output -```bash -cmr "get conda get-conda [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/DevOps-automation/get-git-repo/index.md b/docs/scripts/DevOps-automation/get-git-repo/index.md deleted file mode 100644 index 8108b2915..000000000 --- a/docs/scripts/DevOps-automation/get-git-repo/index.md +++ /dev/null @@ -1,187 +0,0 @@ -# get-git-repo -Automatically generated README for this automation recipe: **get-git-repo** - -Category: **[DevOps automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-git-repo/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-git-repo/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get git repo repository clone" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,git,repo,repository,clone[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get git repo repository clone [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,git,repo,repository,clone' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get git repo repository clone[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_lfs` - - ENV variables: - - CM_GIT_REPO_NEEDS_LFS: `yes` - * `_no-recurse-submodules` - - ENV variables: - - CM_GIT_RECURSE_SUBMODULES: `` - * `_patch` - - ENV variables: - - CM_GIT_PATCH: `yes` - * `_submodules.#` - - ENV variables: - - CM_GIT_SUBMODULES: `#` - -
- - - * Group "**checkout**" -
- Click here to expand this section. - - * `_branch.#` - - ENV variables: - - CM_GIT_BRANCH: `#` - * `_sha.#` - - ENV variables: - - CM_GIT_SHA: `#` - * `_tag.#` - - ENV variables: - - CM_GIT_CHECKOUT_TAG: `#` - -
- - - * Group "**git-history**" -
- Click here to expand this section. - - * `_full-history` - - ENV variables: - - CM_GIT_DEPTH: `` - * **`_short-history`** (default) - - ENV variables: - - CM_GIT_DEPTH: `--depth 5` - -
- - - * Group "**repo**" -
- Click here to expand this section. - - * `_repo.#` - - ENV variables: - - CM_GIT_URL: `#` - -
- - - ##### Default variations - - `_short-history` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--branch=value` → `CM_GIT_CHECKOUT=value` - * `--depth=value` → `CM_GIT_DEPTH=value` - * `--env_key=value` → `CM_GIT_ENV_KEY=value` - * `--folder=value` → `CM_GIT_CHECKOUT_FOLDER=value` - * `--patch=value` → `CM_GIT_PATCH=value` - * `--pull=value` → `CM_GIT_REPO_PULL=value` - * `--submodules=value` → `CM_GIT_RECURSE_SUBMODULES=value` - * `--update=value` → `CM_GIT_REPO_PULL=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_GIT_DEPTH: `--depth 4` - * CM_GIT_CHECKOUT_FOLDER: `repo` - * CM_GIT_PATCH: `no` - * CM_GIT_RECURSE_SUBMODULES: ` --recurse-submodules` - * CM_GIT_URL: `https://github.com/mlcommons/ck.git` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-git-repo/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-git-repo/run.bat) -___ -#### Script output -```bash -cmr "get git repo repository clone [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/DevOps-automation/get-github-cli/index.md b/docs/scripts/DevOps-automation/get-github-cli/index.md deleted file mode 100644 index 06d0a33f0..000000000 --- a/docs/scripts/DevOps-automation/get-github-cli/index.md +++ /dev/null @@ -1,86 +0,0 @@ -# get-github-cli -Automatically generated README for this automation recipe: **get-github-cli** - -Category: **[DevOps automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-github-cli/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get gh gh-cli github cli github-cli" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,gh,gh-cli,github,cli,github-cli - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get gh gh-cli github cli github-cli " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,gh,gh-cli,github,cli,github-cli' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get gh gh-cli github cli github-cli" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-github-cli/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-github-cli/run.bat) -___ -#### Script output -```bash -cmr "get gh gh-cli github cli github-cli " -j -``` \ No newline at end of file diff --git a/docs/scripts/DevOps-automation/index.md b/docs/scripts/DevOps-automation/index.md deleted file mode 100644 index 94dd95d37..000000000 --- a/docs/scripts/DevOps-automation/index.md +++ /dev/null @@ -1,22 +0,0 @@ -* [benchmark-program](benchmark-program/index.md) -* [compile-program](compile-program/index.md) -* [convert-csv-to-md](convert-csv-to-md/index.md) -* [copy-to-clipboard](copy-to-clipboard/index.md) -* [create-conda-env](create-conda-env/index.md) -* [create-patch](create-patch/index.md) -* [detect-sudo](detect-sudo/index.md) -* [download-and-extract](download-and-extract/index.md) -* [download-file](download-file/index.md) -* [download-torrent](download-torrent/index.md) -* [extract-file](extract-file/index.md) -* [fail](fail/index.md) -* [get-conda](get-conda/index.md) -* [get-git-repo](get-git-repo/index.md) -* [get-github-cli](get-github-cli/index.md) -* [pull-git-repo](pull-git-repo/index.md) -* [push-csv-to-spreadsheet](push-csv-to-spreadsheet/index.md) -* [set-device-settings-qaic](set-device-settings-qaic/index.md) -* [set-echo-off-win](set-echo-off-win/index.md) -* [set-performance-mode](set-performance-mode/index.md) -* [set-sqlite-dir](set-sqlite-dir/index.md) -* [tar-my-folder](tar-my-folder/index.md) diff --git a/docs/scripts/DevOps-automation/pull-git-repo/index.md b/docs/scripts/DevOps-automation/pull-git-repo/index.md deleted file mode 100644 index e600e5129..000000000 --- a/docs/scripts/DevOps-automation/pull-git-repo/index.md +++ /dev/null @@ -1,95 +0,0 @@ -# pull-git-repo -Automatically generated README for this automation recipe: **pull-git-repo** - -Category: **[DevOps automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/pull-git-repo/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "pull git repo repository" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=pull,git,repo,repository [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "pull git repo repository " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'pull,git,repo,repository' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "pull git repo repository" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--path=value` → `CM_GIT_CHECKOUT_PATH=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/pull-git-repo/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "pull git repo repository " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/DevOps-automation/push-csv-to-spreadsheet/index.md b/docs/scripts/DevOps-automation/push-csv-to-spreadsheet/index.md deleted file mode 100644 index 1ea013cb0..000000000 --- a/docs/scripts/DevOps-automation/push-csv-to-spreadsheet/index.md +++ /dev/null @@ -1,107 +0,0 @@ -# push-csv-to-spreadsheet -Automatically generated README for this automation recipe: **push-csv-to-spreadsheet** - -Category: **[DevOps automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/push-csv-to-spreadsheet/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "push google-spreadsheet spreadsheet push-to-google-spreadsheet" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=push,google-spreadsheet,spreadsheet,push-to-google-spreadsheet [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "push google-spreadsheet spreadsheet push-to-google-spreadsheet " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'push,google-spreadsheet,spreadsheet,push-to-google-spreadsheet' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "push google-spreadsheet spreadsheet push-to-google-spreadsheet" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--csv_file=value` → `CM_CSV_FILE_PATH=value` - * `--sheet_name=value` → `CM_GOOGLE_SHEET_NAME=value` - * `--spreadsheet_id=value` → `CM_GOOGLE_SPREADSHEET_ID=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_GOOGLE_SPREADSHEET_ID: `1gMHjXmFmwZR4-waPPyxy5Pc3VARqX3kKUWxkP97Xa6Y` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/push-csv-to-spreadsheet/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "push google-spreadsheet spreadsheet push-to-google-spreadsheet " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/DevOps-automation/set-device-settings-qaic/index.md b/docs/scripts/DevOps-automation/set-device-settings-qaic/index.md deleted file mode 100644 index 149675edd..000000000 --- a/docs/scripts/DevOps-automation/set-device-settings-qaic/index.md +++ /dev/null @@ -1,114 +0,0 @@ -# set-device-settings-qaic -Automatically generated README for this automation recipe: **set-device-settings-qaic** - -Category: **[DevOps automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/set-device-settings-qaic/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "set device qaic ai100 cloud performance power setting mode vc ecc" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=set,device,qaic,ai100,cloud,performance,power,setting,mode,vc,ecc[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "set device qaic ai100 cloud performance power setting mode vc ecc [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'set,device,qaic,ai100,cloud,performance,power,setting,mode,vc,ecc' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "set device qaic ai100 cloud performance power setting mode vc ecc[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_ecc` - - ENV variables: - - CM_QAIC_ECC: `yes` - * `_vc.#` - - ENV variables: - - CM_QAIC_VC: `#` - -
- -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_QAIC_DEVICES: `0` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/set-device-settings-qaic/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "set device qaic ai100 cloud performance power setting mode vc ecc [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/DevOps-automation/set-echo-off-win/index.md b/docs/scripts/DevOps-automation/set-echo-off-win/index.md deleted file mode 100644 index 52ff14aa9..000000000 --- a/docs/scripts/DevOps-automation/set-echo-off-win/index.md +++ /dev/null @@ -1,80 +0,0 @@ -# set-echo-off-win -Automatically generated README for this automation recipe: **set-echo-off-win** - -Category: **[DevOps automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/set-echo-off-win/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "set echo off win echo-off-win echo-off" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=set,echo,off,win,echo-off-win,echo-off - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "set echo off win echo-off-win echo-off " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'set,echo,off,win,echo-off-win,echo-off' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "set echo off win echo-off-win echo-off" - ``` -___ - - -___ -#### Script output -```bash -cmr "set echo off win echo-off-win echo-off " -j -``` \ No newline at end of file diff --git a/docs/scripts/DevOps-automation/set-performance-mode/index.md b/docs/scripts/DevOps-automation/set-performance-mode/index.md deleted file mode 100644 index 3a1c6de33..000000000 --- a/docs/scripts/DevOps-automation/set-performance-mode/index.md +++ /dev/null @@ -1,139 +0,0 @@ -# set-performance-mode -Automatically generated README for this automation recipe: **set-performance-mode** - -Category: **[DevOps automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/set-performance-mode/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "set system performance power mode" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=set,system,performance,power,mode[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "set system performance power mode [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'set,system,performance,power,mode' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "set system performance power mode[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_reproducibility` - - ENV variables: - - CM_SET_OS_PERFORMANCE_REPRODUCIBILITY_MODE: `yes` - -
- - - * Group "**device**" -
- Click here to expand this section. - - * **`_cpu`** (default) - - ENV variables: - - CM_SET_PERFORMANCE_MODE_OF: `cpu` - -
- - - * Group "**performance-mode**" -
- Click here to expand this section. - - * **`_performance`** (default) - - ENV variables: - - CM_SET_PERFORMANCE_MODE: `performance` - -
- - - * Group "**power**" -
- Click here to expand this section. - - * `_power` - - ENV variables: - - CM_SET_PERFORMANCE_MODE: `power` - -
- - - ##### Default variations - - `_cpu,_performance` - -#### Native script being run -=== "Linux/macOS" - * [run-ubuntu.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/set-performance-mode/run-ubuntu.sh) - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/set-performance-mode/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/set-performance-mode/run.bat) -___ -#### Script output -```bash -cmr "set system performance power mode [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/DevOps-automation/set-sqlite-dir/index.md b/docs/scripts/DevOps-automation/set-sqlite-dir/index.md deleted file mode 100644 index 69229f604..000000000 --- a/docs/scripts/DevOps-automation/set-sqlite-dir/index.md +++ /dev/null @@ -1,95 +0,0 @@ -# set-sqlite-dir -Automatically generated README for this automation recipe: **set-sqlite-dir** - -Category: **[DevOps automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/set-sqlite-dir/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "set sqlite dir sqlite-dir" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=set,sqlite,dir,sqlite-dir [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "set sqlite dir sqlite-dir " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'set,sqlite,dir,sqlite-dir' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "set sqlite dir sqlite-dir" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--path=value` → `CM_SQLITE_PATH=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/set-sqlite-dir/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/set-sqlite-dir/run.bat) -___ -#### Script output -```bash -cmr "set sqlite dir sqlite-dir " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/DevOps-automation/tar-my-folder/index.md b/docs/scripts/DevOps-automation/tar-my-folder/index.md deleted file mode 100644 index 91b8bcaf0..000000000 --- a/docs/scripts/DevOps-automation/tar-my-folder/index.md +++ /dev/null @@ -1,92 +0,0 @@ -# tar-my-folder -Automatically generated README for this automation recipe: **tar-my-folder** - -Category: **[DevOps automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/tar-my-folder/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/tar-my-folder/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "run tar" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=run,tar [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "run tar " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'run,tar' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "run tar" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--input_dir=value` → `CM_TAR_INPUT_DIR=value` - * `--outfile=value` → `CM_TAR_OUTFILE=value` - * `--output_dir=value` → `CM_TAR_OUTPUT_DIR=value` - - - - -___ -#### Script output -```bash -cmr "run tar " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Docker-automation/build-docker-image/index.md b/docs/scripts/Docker-automation/build-docker-image/index.md deleted file mode 100644 index 979bdc8a1..000000000 --- a/docs/scripts/Docker-automation/build-docker-image/index.md +++ /dev/null @@ -1,120 +0,0 @@ -# build-docker-image -Automatically generated README for this automation recipe: **build-docker-image** - -Category: **[Docker automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/build-docker-image/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/build-docker-image/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "build docker image docker-image dockerimage" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=build,docker,image,docker-image,dockerimage [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "build docker image docker-image dockerimage " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'build,docker,image,docker-image,dockerimage' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "build docker image docker-image dockerimage" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--cache=value` → `CM_DOCKER_CACHE=value` - * `--cm_repo=value` → `CM_MLOPS_REPO=value` - * `--docker_os=value` → `CM_DOCKER_OS=value` - * `--docker_os_version=value` → `CM_DOCKER_OS_VERSION=value` - * `--dockerfile=value` → `CM_DOCKERFILE_WITH_PATH=value` - * `--gh_token=value` → `CM_GH_TOKEN=value` - * `--image_name=value` → `CM_DOCKER_IMAGE_NAME=value` - * `--image_repo=value` → `CM_DOCKER_IMAGE_REPO=value` - * `--image_tag=value` → `CM_DOCKER_IMAGE_TAG=value` - * `--post_run_cmds=value` → `CM_DOCKER_POST_RUN_COMMANDS=value` - * `--pre_run_cmds=value` → `CM_DOCKER_PRE_RUN_COMMANDS=value` - * `--push_image=value` → `CM_DOCKER_PUSH_IMAGE=value` - * `--real_run=value` → `CM_REAL_RUN=value` - * `--script_tags=value` → `CM_DOCKER_RUN_SCRIPT_TAGS=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_DOCKER_IMAGE_REPO: `local` - * CM_DOCKER_IMAGE_TAG: `latest` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/build-docker-image/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/build-docker-image/run.bat) -___ -#### Script output -```bash -cmr "build docker image docker-image dockerimage " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Docker-automation/build-dockerfile/index.md b/docs/scripts/Docker-automation/build-dockerfile/index.md deleted file mode 100644 index 7e4ea3639..000000000 --- a/docs/scripts/Docker-automation/build-dockerfile/index.md +++ /dev/null @@ -1,145 +0,0 @@ -# build-dockerfile -Automatically generated README for this automation recipe: **build-dockerfile** - -Category: **[Docker automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/build-dockerfile/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/build-dockerfile/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "build dockerfile" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=build,dockerfile[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "build dockerfile [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'build,dockerfile' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "build dockerfile[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_slim` - - ENV variables: - - CM_DOCKER_BUILD_SLIM: `yes` - -
- -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--build=value` → `CM_BUILD_DOCKER_IMAGE=value` - * `--cache=value` → `CM_DOCKER_CACHE=value` - * `--cm_repo=value` → `CM_MLOPS_REPO=value` - * `--cm_repo_flags=value` → `CM_DOCKER_ADD_FLAG_TO_CM_MLOPS_REPO=value` - * `--cm_repos=value` → `CM_DOCKER_EXTRA_CM_REPOS=value` - * `--comments=value` → `CM_DOCKER_RUN_COMMENTS=value` - * `--copy_files=value` → `CM_DOCKER_COPY_FILES=value` - * `--docker_base_image=value` → `CM_DOCKER_IMAGE_BASE=value` - * `--docker_os=value` → `CM_DOCKER_OS=value` - * `--docker_os_version=value` → `CM_DOCKER_OS_VERSION=value` - * `--extra_sys_deps=value` → `CM_DOCKER_EXTRA_SYS_DEPS=value` - * `--fake_docker_deps=value` → `CM_DOCKER_FAKE_DEPS=value` - * `--fake_run_option=value` → `CM_DOCKER_FAKE_RUN_OPTION=value` - * `--file_path=value` → `CM_DOCKERFILE_WITH_PATH=value` - * `--gh_token=value` → `CM_GH_TOKEN=value` - * `--image_repo=value` → `CM_DOCKER_IMAGE_REPO=value` - * `--image_tag=value` → `CM_DOCKER_IMAGE_TAG=value` - * `--package_manager_update_cmd=value` → `CM_PACKAGE_MANAGER_UPDATE_CMD=value` - * `--pip_extra_flags=value` → `CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS=value` - * `--post_file=value` → `DOCKER_IMAGE_POST_FILE=value` - * `--post_run_cmds=value` → `CM_DOCKER_POST_RUN_COMMANDS=value` - * `--pre_run_cmds=value` → `CM_DOCKER_PRE_RUN_COMMANDS=value` - * `--push_image=value` → `CM_DOCKER_PUSH_IMAGE=value` - * `--real_run=value` → `CM_REAL_RUN=value` - * `--run_cmd=value` → `CM_DOCKER_RUN_CMD=value` - * `--run_cmd_extra=value` → `CM_DOCKER_RUN_CMD_EXTRA=value` - * `--script_tags=value` → `CM_DOCKER_RUN_SCRIPT_TAGS=value` - * `--skip_cm_sys_upgrade=value` → `CM_DOCKER_SKIP_CM_SYS_UPGRADE=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_DOCKER_BUILD_SLIM: `no` - * CM_DOCKER_IMAGE_EOL: ` -` - * CM_DOCKER_OS: `ubuntu` - - - -___ -#### Script output -```bash -cmr "build dockerfile [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Docker-automation/index.md b/docs/scripts/Docker-automation/index.md deleted file mode 100644 index ec6c83374..000000000 --- a/docs/scripts/Docker-automation/index.md +++ /dev/null @@ -1,4 +0,0 @@ -* [build-docker-image](build-docker-image/index.md) -* [build-dockerfile](build-dockerfile/index.md) -* [prune-docker](prune-docker/index.md) -* [run-docker-container](run-docker-container/index.md) diff --git a/docs/scripts/Docker-automation/prune-docker/index.md b/docs/scripts/Docker-automation/prune-docker/index.md deleted file mode 100644 index e1025b409..000000000 --- a/docs/scripts/Docker-automation/prune-docker/index.md +++ /dev/null @@ -1,86 +0,0 @@ -# prune-docker -Automatically generated README for this automation recipe: **prune-docker** - -Category: **[Docker automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/prune-docker/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "prune docker" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=prune,docker - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "prune docker " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'prune,docker' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "prune docker" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/prune-docker/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/prune-docker/run.bat) -___ -#### Script output -```bash -cmr "prune docker " -j -``` \ No newline at end of file diff --git a/docs/scripts/Docker-automation/run-docker-container/index.md b/docs/scripts/Docker-automation/run-docker-container/index.md deleted file mode 100644 index 68266dfa5..000000000 --- a/docs/scripts/Docker-automation/run-docker-container/index.md +++ /dev/null @@ -1,130 +0,0 @@ -# run-docker-container -Automatically generated README for this automation recipe: **run-docker-container** - -Category: **[Docker automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/run-docker-container/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/run-docker-container/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "run docker container" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=run,docker,container [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "run docker container " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'run,docker,container' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "run docker container" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--all_gpus=value` → `CM_DOCKER_ADD_ALL_GPUS=value` - * `--base=value` → `CM_DOCKER_IMAGE_BASE=value` - * `--cache=value` → `CM_DOCKER_CACHE=value` - * `--cm_repo=value` → `CM_MLOPS_REPO=value` - * `--detached=value` → `CM_DOCKER_DETACHED_MODE=value` - * `--device=value` → `CM_DOCKER_ADD_DEVICE=value` - * `--docker_image_base=value` → `CM_DOCKER_IMAGE_BASE=value` - * `--docker_os=value` → `CM_DOCKER_OS=value` - * `--docker_os_version=value` → `CM_DOCKER_OS_VERSION=value` - * `--extra_run_args=value` → `CM_DOCKER_EXTRA_RUN_ARGS=value` - * `--fake_run_option=value` → `CM_DOCKER_FAKE_RUN_OPTION=value` - * `--gh_token=value` → `CM_GH_TOKEN=value` - * `--image_name=value` → `CM_DOCKER_IMAGE_NAME=value` - * `--image_repo=value` → `CM_DOCKER_IMAGE_REPO=value` - * `--image_tag=value` → `CM_DOCKER_IMAGE_TAG=value` - * `--image_tag_extra=value` → `CM_DOCKER_IMAGE_TAG_EXTRA=value` - * `--interactive=value` → `CM_DOCKER_INTERACTIVE_MODE=value` - * `--it=value` → `CM_DOCKER_INTERACTIVE=value` - * `--mounts=value` → `CM_DOCKER_VOLUME_MOUNTS=value` - * `--num_gpus=value` → `CM_DOCKER_ADD_NUM_GPUS=value` - * `--pass_user_group=value` → `CM_DOCKER_PASS_USER_GROUP=value` - * `--port_maps=value` → `CM_DOCKER_PORT_MAPS=value` - * `--post_run_cmds=value` → `CM_DOCKER_POST_RUN_COMMANDS=value` - * `--pre_run_cmds=value` → `CM_DOCKER_PRE_RUN_COMMANDS=value` - * `--real_run=value` → `CM_REAL_RUN=value` - * `--recreate=value` → `CM_DOCKER_IMAGE_RECREATE=value` - * `--run_cmd=value` → `CM_DOCKER_RUN_CMD=value` - * `--run_cmd_extra=value` → `CM_DOCKER_RUN_CMD_EXTRA=value` - * `--save_script=value` → `CM_DOCKER_SAVE_SCRIPT=value` - * `--script_tags=value` → `CM_DOCKER_RUN_SCRIPT_TAGS=value` - * `--shm_size=value` → `CM_DOCKER_SHM_SIZE=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_DOCKER_DETACHED_MODE: `yes` - - - -___ -#### Script output -```bash -cmr "run docker container " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/GUI/gui/index.md b/docs/scripts/GUI/gui/index.md deleted file mode 100644 index 65f72b8c8..000000000 --- a/docs/scripts/GUI/gui/index.md +++ /dev/null @@ -1,174 +0,0 @@ -# gui -Automatically generated README for this automation recipe: **gui** - -Category: **[GUI](..)** - -License: **Apache 2.0** - -Developers: [Grigori Fursin](https://cKnowledge.org/gfursin) - - ---- - -This CM script provides a unified GUI to run CM scripts using [Streamlit library](https://streamlit.io). - -If you want to run it in a cloud (Azure, AWS, GCP), you need to open some port and test that you can reach it from outside. - -By default, streamlit uses port 8501 but you can change it as follows: - -```bash -cm run script "cm gui" --port 80 -``` - -If you have troubles accessing this port, use this simple python module to test if your port is open: -```bash -python3 -m http.server 80 -``` - - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/gui/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "cm gui cm-gui script-gui cm-script-gui streamlit" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=cm,gui,cm-gui,script-gui,cm-script-gui,streamlit[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "cm gui cm-gui script-gui cm-script-gui streamlit [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'cm,gui,cm-gui,script-gui,cm-script-gui,streamlit' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "cm gui cm-gui script-gui cm-script-gui streamlit[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**app**" -
- Click here to expand this section. - - * `_chatgpt` - - ENV variables: - - CM_GUI_APP: `chatgpt` - * `_graph` - - ENV variables: - - CM_GUI_APP: `graph` - * `_main` - - ENV variables: - - CM_GUI_APP: `app` - * `_playground` - - ENV variables: - - CM_GUI_APP: `playground` - -
- -=== "Input Flags" - - - #### Input Flags - - * --**script:** script tags - * --**app:** gui app -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--address=value` → `CM_GUI_ADDRESS=value` - * `--app=value` → `CM_GUI_APP=value` - * `--exp_key_c=value` → `CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_C=value` - * `--exp_key_s=value` → `CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_S=value` - * `--exp_key_x=value` → `CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_X=value` - * `--exp_key_y=value` → `CM_GUI_GRAPH_EXPERIMENT_AXIS_KEY_Y=value` - * `--exp_max_results=value` → `CM_GUI_GRAPH_EXPERIMENT_MAX_RESULTS=value` - * `--exp_name=value` → `CM_GUI_GRAPH_EXPERIMENT_NAME=value` - * `--exp_tags=value` → `CM_GUI_GRAPH_EXPERIMENT_TAGS=value` - * `--exp_title=value` → `CM_GUI_GRAPH_EXPERIMENT_TITLE=value` - * `--exp_uid=value` → `CM_GUI_GRAPH_EXPERIMENT_RESULT_UID=value` - * `--no_browser=value` → `CM_GUI_NO_BROWSER=value` - * `--no_run=value` → `CM_GUI_NO_RUN=value` - * `--port=value` → `CM_GUI_PORT=value` - * `--prefix=value` → `CM_GUI_SCRIPT_PREFIX_LINUX=value` - * `--script=value` → `CM_GUI_SCRIPT_TAGS=value` - * `--title=value` → `CM_GUI_TITLE=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_GUI_EXTRA_CMD: `` - * CM_GUI_SCRIPT_PREFIX_LINUX: `gnome-terminal --` - * CM_GUI_APP: `app` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/gui/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/gui/run.bat) -___ -#### Script output -```bash -cmr "cm gui cm-gui script-gui cm-script-gui streamlit [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/GUI/index.md b/docs/scripts/GUI/index.md deleted file mode 100644 index b30ad2181..000000000 --- a/docs/scripts/GUI/index.md +++ /dev/null @@ -1 +0,0 @@ -* [gui](gui/index.md) diff --git a/docs/scripts/Legacy-CK-support/get-ck-repo-mlops/index.md b/docs/scripts/Legacy-CK-support/get-ck-repo-mlops/index.md deleted file mode 100644 index 4c43e6df2..000000000 --- a/docs/scripts/Legacy-CK-support/get-ck-repo-mlops/index.md +++ /dev/null @@ -1,86 +0,0 @@ -# get-ck-repo-mlops -Automatically generated README for this automation recipe: **get-ck-repo-mlops** - -Category: **[Legacy CK support](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ck-repo-mlops/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get ck-repo mlops ck-repo-mlops" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,ck-repo,mlops,ck-repo-mlops - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get ck-repo mlops ck-repo-mlops " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,ck-repo,mlops,ck-repo-mlops' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get ck-repo mlops ck-repo-mlops" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ck-repo-mlops/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ck-repo-mlops/run.bat) -___ -#### Script output -```bash -cmr "get ck-repo mlops ck-repo-mlops " -j -``` \ No newline at end of file diff --git a/docs/scripts/Legacy-CK-support/get-ck/index.md b/docs/scripts/Legacy-CK-support/get-ck/index.md deleted file mode 100644 index 954ae2c2e..000000000 --- a/docs/scripts/Legacy-CK-support/get-ck/index.md +++ /dev/null @@ -1,86 +0,0 @@ -# get-ck -Automatically generated README for this automation recipe: **get-ck** - -Category: **[Legacy CK support](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ck/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get ck ck-framework" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,ck,ck-framework - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get ck ck-framework " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,ck,ck-framework' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get ck ck-framework" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ck/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ck/run.bat) -___ -#### Script output -```bash -cmr "get ck ck-framework " -j -``` \ No newline at end of file diff --git a/docs/scripts/Legacy-CK-support/index.md b/docs/scripts/Legacy-CK-support/index.md deleted file mode 100644 index 7f099c797..000000000 --- a/docs/scripts/Legacy-CK-support/index.md +++ /dev/null @@ -1,2 +0,0 @@ -* [get-ck](get-ck/index.md) -* [get-ck-repo-mlops](get-ck-repo-mlops/index.md) diff --git a/docs/scripts/MLPerf-benchmark-support/add-custom-nvidia-system/index.md b/docs/scripts/MLPerf-benchmark-support/add-custom-nvidia-system/index.md deleted file mode 100644 index 50a57acce..000000000 --- a/docs/scripts/MLPerf-benchmark-support/add-custom-nvidia-system/index.md +++ /dev/null @@ -1,109 +0,0 @@ -# add-custom-nvidia-system -Automatically generated README for this automation recipe: **add-custom-nvidia-system** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/add-custom-nvidia-system/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/add-custom-nvidia-system/_cm.yaml)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "add custom system nvidia" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=add,custom,system,nvidia[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "add custom system nvidia [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'add,custom,system,nvidia' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "add custom system nvidia[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**code**" -
- Click here to expand this section. - - * `_ctuning` - * `_custom` - * `_go` - * `_mlcommons` - * `_nvidia-only` - -
- -#### Versions -* `r2.1` -* `r3.0` -* `r3.1` -* `r4.0` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/add-custom-nvidia-system/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "add custom system nvidia [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/benchmark-any-mlperf-inference-implementation/index.md b/docs/scripts/MLPerf-benchmark-support/benchmark-any-mlperf-inference-implementation/index.md deleted file mode 100644 index 471ff7f8d..000000000 --- a/docs/scripts/MLPerf-benchmark-support/benchmark-any-mlperf-inference-implementation/index.md +++ /dev/null @@ -1,192 +0,0 @@ -# benchmark-any-mlperf-inference-implementation -Automatically generated README for this automation recipe: **benchmark-any-mlperf-inference-implementation** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-any-mlperf-inference-implementation/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "benchmark run natively all inference any mlperf mlperf-implementation implementation mlperf-models" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=benchmark,run,natively,all,inference,any,mlperf,mlperf-implementation,implementation,mlperf-models[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "benchmark run natively all inference any mlperf mlperf-implementation implementation mlperf-models [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'benchmark,run,natively,all,inference,any,mlperf,mlperf-implementation,implementation,mlperf-models' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "benchmark run natively all inference any mlperf mlperf-implementation implementation mlperf-models[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**implementation**" -
- Click here to expand this section. - - * `_deepsparse` - - ENV variables: - - DIVISION: `open` - - IMPLEMENTATION: `deepsparse` - * `_intel` - - ENV variables: - - IMPLEMENTATION: `intel` - * `_mil` - - ENV variables: - - IMPLEMENTATION: `mil` - * `_nvidia` - - ENV variables: - - IMPLEMENTATION: `nvidia-original` - * `_qualcomm` - - ENV variables: - - IMPLEMENTATION: `qualcomm` - * `_reference` - - ENV variables: - - IMPLEMENTATION: `reference` - * `_tflite-cpp` - - ENV variables: - - IMPLEMENTATION: `tflite_cpp` - -
- - - * Group "**power**" -
- Click here to expand this section. - - * **`_performance-only`** (default) - * `_power` - - ENV variables: - - POWER: `True` - -
- - - * Group "**sut**" -
- Click here to expand this section. - - * `_aws-dl2q.24xlarge` - * `_macbookpro-m1` - - ENV variables: - - CATEGORY: `edge` - - DIVISION: `closed` - * `_mini` - * `_orin` - * `_orin.32g` - - ENV variables: - - CATEGORY: `edge` - - DIVISION: `closed` - * `_phoenix` - - ENV variables: - - CATEGORY: `edge` - - DIVISION: `closed` - * `_rb6` - * `_rpi4` - * `_sapphire-rapids.24c` - - ENV variables: - - CATEGORY: `edge` - - DIVISION: `closed` - -
- - - ##### Default variations - - `_performance-only` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--backends=value` → `BACKENDS=value` - * `--category=value` → `CATEGORY=value` - * `--devices=value` → `DEVICES=value` - * `--division=value` → `DIVISION=value` - * `--extra_args=value` → `EXTRA_ARGS=value` - * `--models=value` → `MODELS=value` - * `--power_server=value` → `POWER_SERVER=value` - * `--power_server_port=value` → `POWER_SERVER_PORT=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * DIVISION: `open` - * CATEGORY: `edge` - - - -#### Native script being run -=== "Linux/macOS" - * [run-template.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-any-mlperf-inference-implementation/run-template.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "benchmark run natively all inference any mlperf mlperf-implementation implementation mlperf-models [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/build-mlperf-inference-server-nvidia/index.md b/docs/scripts/MLPerf-benchmark-support/build-mlperf-inference-server-nvidia/index.md deleted file mode 100644 index a6c9522ce..000000000 --- a/docs/scripts/MLPerf-benchmark-support/build-mlperf-inference-server-nvidia/index.md +++ /dev/null @@ -1,164 +0,0 @@ -# build-mlperf-inference-server-nvidia -Automatically generated README for this automation recipe: **build-mlperf-inference-server-nvidia** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/build-mlperf-inference-server-nvidia/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/build-mlperf-inference-server-nvidia/_cm.yaml)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "build mlcommons mlperf inference inference-server server nvidia-harness nvidia" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=build,mlcommons,mlperf,inference,inference-server,server,nvidia-harness,nvidia[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "build mlcommons mlperf inference inference-server server nvidia-harness nvidia [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'build,mlcommons,mlperf,inference,inference-server,server,nvidia-harness,nvidia' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "build mlcommons mlperf inference inference-server server nvidia-harness nvidia[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**code**" -
- Click here to expand this section. - - * **`_ctuning`** (default) - * `_custom` - * `_go` - * `_mlcommons` - * `_nvidia-only` - -
- - - * Group "**device**" -
- Click here to expand this section. - - * `_cpu` - - ENV variables: - - CM_MLPERF_DEVICE: `cpu` - * **`_cuda`** (default) - - ENV variables: - - CM_MLPERF_DEVICE: `cuda` - - CM_MLPERF_DEVICE_LIB_NAMESPEC: `cudart` - * `_inferentia` - - ENV variables: - - CM_MLPERF_DEVICE: `inferentia` - -
- - - * Group "**version**" -
- Click here to expand this section. - - * `_r4.0` - -
- - - ##### Default variations - - `_ctuning,_cuda` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--clean=value` → `CM_MAKE_CLEAN=value` - * `--custom_system=value` → `CM_CUSTOM_SYSTEM_NVIDIA=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_MAKE_BUILD_COMMAND: `build` - * CM_MAKE_CLEAN: `no` - * CM_CUSTOM_SYSTEM_NVIDIA: `yes` - - -#### Versions -Default version: `r3.1` - -* `r2.1` -* `r3.0` -* `r3.1` -* `r4.0` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/build-mlperf-inference-server-nvidia/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "build mlcommons mlperf inference inference-server server nvidia-harness nvidia [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/generate-mlperf-inference-submission/index.md b/docs/scripts/MLPerf-benchmark-support/generate-mlperf-inference-submission/index.md deleted file mode 100644 index 566e49acc..000000000 --- a/docs/scripts/MLPerf-benchmark-support/generate-mlperf-inference-submission/index.md +++ /dev/null @@ -1,122 +0,0 @@ -# generate-mlperf-inference-submission -Automatically generated README for this automation recipe: **generate-mlperf-inference-submission** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-inference-submission/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-inference-submission/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "generate submission mlperf mlperf-inference inference mlcommons inference-submission mlperf-inference-submission mlcommons-inference-submission" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=generate,submission,mlperf,mlperf-inference,inference,mlcommons,inference-submission,mlperf-inference-submission,mlcommons-inference-submission [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "generate submission mlperf mlperf-inference inference mlcommons inference-submission mlperf-inference-submission mlcommons-inference-submission " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'generate,submission,mlperf,mlperf-inference,inference,mlcommons,inference-submission,mlperf-inference-submission,mlcommons-inference-submission' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "generate submission mlperf mlperf-inference inference mlcommons inference-submission mlperf-inference-submission mlcommons-inference-submission" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--analyzer_settings_file=value` → `CM_MLPERF_POWER_ANALYZER_SETTINGS_FILE_PATH=value` - * `--category=value` → `CM_MLPERF_SUBMISSION_CATEGORY=value` - * `--clean=value` → `CM_MLPERF_CLEAN_SUBMISSION_DIR=value` - * `--dashboard=value` → `CM_MLPERF_DASHBOARD=value` - * `--dashboard_wb_project=value` → `CM_MLPERF_DASHBOARD_WANDB_PROJECT=value` - * `--device=value` → `CM_MLPERF_DEVICE=value` - * `--division=value` → `CM_MLPERF_SUBMISSION_DIVISION=value` - * `--duplicate=value` → `CM_MLPERF_DUPLICATE_SCENARIO_RESULTS=value` - * `--hw_name=value` → `CM_HW_NAME=value` - * `--hw_notes_extra=value` → `CM_MLPERF_SUT_HW_NOTES_EXTRA=value` - * `--infer_scenario_results=value` → `CM_MLPERF_DUPLICATE_SCENARIO_RESULTS=value` - * `--power_settings_file=value` → `CM_MLPERF_POWER_SETTINGS_FILE_PATH=value` - * `--preprocess=value` → `CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR=value` - * `--preprocess_submission=value` → `CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR=value` - * `--results_dir=value` → `CM_MLPERF_INFERENCE_RESULTS_DIR_=value` - * `--run_checker=value` → `CM_RUN_SUBMISSION_CHECKER=value` - * `--run_style=value` → `CM_MLPERF_RUN_STYLE=value` - * `--skip_truncation=value` → `CM_SKIP_TRUNCATE_ACCURACY=value` - * `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` - * `--submitter=value` → `CM_MLPERF_SUBMITTER=value` - * `--sw_notes_extra=value` → `CM_MLPERF_SUT_SW_NOTES_EXTRA=value` - * `--tar=value` → `CM_TAR_SUBMISSION_DIR=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_RUN_MLPERF_ACCURACY: `on` - * CM_MLPERF_RUN_STYLE: `valid` - - - -___ -#### Script output -```bash -cmr "generate submission mlperf mlperf-inference inference mlcommons inference-submission mlperf-inference-submission mlcommons-inference-submission " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/generate-mlperf-inference-user-conf/index.md b/docs/scripts/MLPerf-benchmark-support/generate-mlperf-inference-user-conf/index.md deleted file mode 100644 index c56840eb3..000000000 --- a/docs/scripts/MLPerf-benchmark-support/generate-mlperf-inference-user-conf/index.md +++ /dev/null @@ -1,122 +0,0 @@ -# generate-mlperf-inference-user-conf -Automatically generated README for this automation recipe: **generate-mlperf-inference-user-conf** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - -Developers: [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), [Grigori Fursin](https://cKnowledge.org/gfursin) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-inference-user-conf/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "generate mlperf inference user-conf inference-user-conf" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=generate,mlperf,inference,user-conf,inference-user-conf [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "generate mlperf inference user-conf inference-user-conf " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'generate,mlperf,inference,user-conf,inference-user-conf' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "generate mlperf inference user-conf inference-user-conf" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` - * `--hw_name=value` → `CM_HW_NAME=value` - * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` - * `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` - * `--num_threads=value` → `CM_NUM_THREADS=value` - * `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` - * `--output_dir=value` → `OUTPUT_BASE_DIR=value` - * `--performance_sample_count=value` → `CM_MLPERF_PERFORMANCE_SAMPLE_COUNT=value` - * `--power=value` → `CM_MLPERF_POWER=value` - * `--regenerate_files=value` → `CM_REGENERATE_MEASURE_FILES=value` - * `--rerun=value` → `CM_RERUN=value` - * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` - * `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` - * `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` - * `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` - * `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` - * `--test_query_count=value` → `CM_TEST_QUERY_COUNT=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_MLPERF_LOADGEN_MODE: `accuracy` - * CM_MLPERF_LOADGEN_SCENARIO: `Offline` - * CM_OUTPUT_FOLDER_NAME: `test_results` - * CM_MLPERF_RUN_STYLE: `test` - * CM_TEST_QUERY_COUNT: `10` - * CM_FAST_FACTOR: `5` - * CM_MLPERF_QUANTIZATION: `False` - - - -___ -#### Script output -```bash -cmr "generate mlperf inference user-conf inference-user-conf " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/generate-mlperf-tiny-report/index.md b/docs/scripts/MLPerf-benchmark-support/generate-mlperf-tiny-report/index.md deleted file mode 100644 index 74555e500..000000000 --- a/docs/scripts/MLPerf-benchmark-support/generate-mlperf-tiny-report/index.md +++ /dev/null @@ -1,107 +0,0 @@ -# generate-mlperf-tiny-report -Automatically generated README for this automation recipe: **generate-mlperf-tiny-report** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - -Developers: [Grigori Fursin](https://cKnowledge.org/gfursin) -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-tiny-report/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-tiny-report/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "generate mlperf tiny mlperf-tiny report" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=generate,mlperf,tiny,mlperf-tiny,report [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "generate mlperf tiny mlperf-tiny report " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'generate,mlperf,tiny,mlperf-tiny,report' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "generate mlperf tiny mlperf-tiny report" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--repo_tags=value` → `CM_IMPORT_TINYMLPERF_REPO_TAGS=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_IMPORT_TINYMLPERF_REPO_TAGS: `1.1-private` - - - -#### Native script being run -=== "Linux/macOS" - * [run_submission_checker.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-tiny-report/run_submission_checker.sh) -=== "Windows" - - * [run_submission_checker.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-tiny-report/run_submission_checker.bat) -___ -#### Script output -```bash -cmr "generate mlperf tiny mlperf-tiny report " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/generate-mlperf-tiny-submission/index.md b/docs/scripts/MLPerf-benchmark-support/generate-mlperf-tiny-submission/index.md deleted file mode 100644 index 3f583de51..000000000 --- a/docs/scripts/MLPerf-benchmark-support/generate-mlperf-tiny-submission/index.md +++ /dev/null @@ -1,81 +0,0 @@ -# generate-mlperf-tiny-submission -Automatically generated README for this automation recipe: **generate-mlperf-tiny-submission** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-tiny-submission/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-mlperf-tiny-submission/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "generate submission mlperf mlperf-tiny tiny mlcommons tiny-submission mlperf-tiny-submission mlcommons-tiny-submission" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=generate,submission,mlperf,mlperf-tiny,tiny,mlcommons,tiny-submission,mlperf-tiny-submission,mlcommons-tiny-submission - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "generate submission mlperf mlperf-tiny tiny mlcommons tiny-submission mlperf-tiny-submission mlcommons-tiny-submission " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'generate,submission,mlperf,mlperf-tiny,tiny,mlcommons,tiny-submission,mlperf-tiny-submission,mlcommons-tiny-submission' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "generate submission mlperf mlperf-tiny tiny mlcommons tiny-submission mlperf-tiny-submission mlcommons-tiny-submission" - ``` -___ - - -___ -#### Script output -```bash -cmr "generate submission mlperf mlperf-tiny tiny mlcommons tiny-submission mlperf-tiny-submission mlcommons-tiny-submission " -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/generate-nvidia-engine/index.md b/docs/scripts/MLPerf-benchmark-support/generate-nvidia-engine/index.md deleted file mode 100644 index 05f7576e2..000000000 --- a/docs/scripts/MLPerf-benchmark-support/generate-nvidia-engine/index.md +++ /dev/null @@ -1,165 +0,0 @@ -# generate-nvidia-engine -Automatically generated README for this automation recipe: **generate-nvidia-engine** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - - - ---- - -This CM script is in draft stage - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-nvidia-engine/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "generate engine mlperf inference nvidia" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=generate,engine,mlperf,inference,nvidia[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "generate engine mlperf inference nvidia [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'generate,engine,mlperf,inference,nvidia' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "generate engine mlperf inference nvidia[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_batch_size.#` - - ENV variables: - - CM_MODEL_BATCH_SIZE: `None` - * `_copy_streams.#` - - ENV variables: - - CM_GPU_COPY_STREAMS: `None` - * `_cuda` - - ENV variables: - - CM_MLPERF_DEVICE: `gpu` - - CM_MLPERF_DEVICE_LIB_NAMESPEC: `cudart` - -
- - - * Group "**device**" -
- Click here to expand this section. - - * **`_cpu`** (default) - - ENV variables: - - CM_MLPERF_DEVICE: `cpu` - -
- - - * Group "**model**" -
- Click here to expand this section. - - * **`_resnet50`** (default) - - ENV variables: - - CM_MODEL: `resnet50` - * `_retinanet` - - ENV variables: - - CM_MODEL: `retinanet` - -
- - - ##### Default variations - - `_cpu,_resnet50` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_BATCH_COUNT: `1` - * CM_BATCH_SIZE: `1` - * CM_LOADGEN_SCENARIO: `Offline` - * CM_GPU_COPY_STREAMS: `1` - * CM_TENSORRT_WORKSPACE_SIZE: `4194304` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/generate-nvidia-engine/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "generate engine mlperf inference nvidia [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-intel-scratch-space/index.md b/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-intel-scratch-space/index.md deleted file mode 100644 index 4267c8146..000000000 --- a/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-intel-scratch-space/index.md +++ /dev/null @@ -1,117 +0,0 @@ -# get-mlperf-inference-intel-scratch-space -Automatically generated README for this automation recipe: **get-mlperf-inference-intel-scratch-space** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-intel-scratch-space/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get mlperf inference intel scratch space" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,mlperf,inference,intel,scratch,space[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get mlperf inference intel scratch space [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,mlperf,inference,intel,scratch,space' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get mlperf inference intel scratch space[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**version**" -
- Click here to expand this section. - - * `_version.#` - - ENV variables: - - CM_INTEL_SCRATCH_SPACE_VERSION: `#` - * **`_version.4_0`** (default) - - ENV variables: - - CM_INTEL_SCRATCH_SPACE_VERSION: `4_0` - -
- - - ##### Default variations - - `_version.4_0` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--scratch_path=value` → `MLPERF_INTEL_SCRATCH_PATH=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-intel-scratch-space/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-intel-scratch-space/run.bat) -___ -#### Script output -```bash -cmr "get mlperf inference intel scratch space [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-loadgen/index.md b/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-loadgen/index.md deleted file mode 100644 index 85084ac54..000000000 --- a/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-loadgen/index.md +++ /dev/null @@ -1,144 +0,0 @@ -# get-mlperf-inference-loadgen -Automatically generated README for this automation recipe: **get-mlperf-inference-loadgen** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-loadgen/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-loadgen/_cm.yaml)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get loadgen inference inference-loadgen mlperf mlcommons" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,loadgen,inference,inference-loadgen,mlperf,mlcommons[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get loadgen inference inference-loadgen mlperf mlcommons [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,loadgen,inference,inference-loadgen,mlperf,mlcommons' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get loadgen inference inference-loadgen mlperf mlcommons[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_copy` - * `_custom-python` - - ENV variables: - - CM_TMP_USE_CUSTOM_PYTHON: `on` - * `_download` - - ENV variables: - - CM_DOWNLOAD_CHECKSUM: `af3f9525965b2c1acc348fb882a5bfd1` - - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: `YES` - - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL: `https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0` - - CM_MLPERF_INFERENCE_LOADGEN_VERSION: `v3.1` - - CM_VERIFY_SSL: `False` - * `_download_v3.1` - - ENV variables: - - CM_DOWNLOAD_CHECKSUM: `af3f9525965b2c1acc348fb882a5bfd1` - - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: `YES` - - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL: `https://www.dropbox.com/scl/fi/36dgoiur26i2tvwgsaatf/loadgen.zip?rlkey=ab68i7uza9anvaw0hk1xvf0qk&dl=0` - - CM_MLPERF_INFERENCE_LOADGEN_VERSION: `v3.1` - - CM_VERIFY_SSL: `False` - * `_download_v4.0` - - ENV variables: - - CM_DOWNLOAD_CHECKSUM: `b4d97525d9ad0539a64667f2a3ca20c5` - - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD: `YES` - - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL: `https://www.dropbox.com/scl/fi/gk5e9kziju5t56umxyzyx/loadgen.zip?rlkey=vsie4xnzml1inpjplm5cg7t54&dl=0` - - CM_MLPERF_INFERENCE_LOADGEN_VERSION: `v4.0` - - CM_VERIFY_SSL: `False` - -
- -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_SHARED_BUILD: `no` - - -#### Versions -Default version: `master` - -* `custom` -* `main` -* `master` -* `pybind_fix` -* `r2.1` -* `r3.0` -* `r3.1` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-loadgen/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-loadgen/run.bat) -___ -#### Script output -```bash -cmr "get loadgen inference inference-loadgen mlperf mlcommons [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-nvidia-common-code/index.md b/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-nvidia-common-code/index.md deleted file mode 100644 index ca4cb291c..000000000 --- a/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-nvidia-common-code/index.md +++ /dev/null @@ -1,105 +0,0 @@ -# get-mlperf-inference-nvidia-common-code -Automatically generated README for this automation recipe: **get-mlperf-inference-nvidia-common-code** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-nvidia-common-code/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-nvidia-common-code/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get nvidia mlperf inference common-code" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,nvidia,mlperf,inference,common-code[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get nvidia mlperf inference common-code [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,nvidia,mlperf,inference,common-code' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get nvidia mlperf inference common-code[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**repo-owner**" -
- Click here to expand this section. - - * `_ctuning` - * `_custom` - * `_go` - * `_mlcommons` - * `_nvidia-only` - -
- -#### Versions -Default version: `r3.1` - -* `r2.1` -* `r3.0` -* `r3.1` -* `r4.0` - -___ -#### Script output -```bash -cmr "get nvidia mlperf inference common-code [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-nvidia-scratch-space/index.md b/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-nvidia-scratch-space/index.md deleted file mode 100644 index 6f7fd5229..000000000 --- a/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-nvidia-scratch-space/index.md +++ /dev/null @@ -1,118 +0,0 @@ -# get-mlperf-inference-nvidia-scratch-space -Automatically generated README for this automation recipe: **get-mlperf-inference-nvidia-scratch-space** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-nvidia-scratch-space/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-nvidia-scratch-space/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get mlperf inference nvidia scratch space" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,mlperf,inference,nvidia,scratch,space[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get mlperf inference nvidia scratch space [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,mlperf,inference,nvidia,scratch,space' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get mlperf inference nvidia scratch space[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**version**" -
- Click here to expand this section. - - * `_version.#` - - ENV variables: - - CM_NVIDIA_SCRATCH_SPACE_VERSION: `#` - * **`_version.4_0`** (default) - - ENV variables: - - CM_NVIDIA_SCRATCH_SPACE_VERSION: `4_0` - -
- - - ##### Default variations - - `_version.4_0` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--scratch_path=value` → `CM_NVIDIA_MLPERF_SCRATCH_PATH=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-nvidia-scratch-space/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-nvidia-scratch-space/run.bat) -___ -#### Script output -```bash -cmr "get mlperf inference nvidia scratch space [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-results-dir/index.md b/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-results-dir/index.md deleted file mode 100644 index b67ffcfda..000000000 --- a/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-results-dir/index.md +++ /dev/null @@ -1,111 +0,0 @@ -# get-mlperf-inference-results-dir -Automatically generated README for this automation recipe: **get-mlperf-inference-results-dir** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-results-dir/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get mlperf inference results dir directory" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,mlperf,inference,results,dir,directory[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get mlperf inference results dir directory [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,mlperf,inference,results,dir,directory' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get mlperf inference results dir directory[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**version**" -
- Click here to expand this section. - - * `_version.#` - - ENV variables: - - CM_MLPERF_INFERENCE_RESULTS_VERSION: `#` - * **`_version.4_0`** (default) - - ENV variables: - - CM_MLPERF_INFERENCE_RESULTS_VERSION: `4_0` - -
- - - ##### Default variations - - `_version.4_0` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--results_dir=value` → `CM_MLPERF_INFERENCE_RESULTS_DIR=value` - - - - -___ -#### Script output -```bash -cmr "get mlperf inference results dir directory [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-results/index.md b/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-results/index.md deleted file mode 100644 index 9297150e2..000000000 --- a/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-results/index.md +++ /dev/null @@ -1,132 +0,0 @@ -# get-mlperf-inference-results -Automatically generated README for this automation recipe: **get-mlperf-inference-results** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-results/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-results/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get results inference inference-results mlcommons mlperf" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,results,inference,inference-results,mlcommons,mlperf[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get results inference inference-results mlcommons mlperf [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,results,inference,inference-results,mlcommons,mlperf' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get results inference inference-results mlcommons mlperf[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**source-repo**" -
- Click here to expand this section. - - * `_ctuning` - - ENV variables: - - GITHUB_REPO_OWNER: `ctuning` - * `_custom` - - ENV variables: - - GITHUB_REPO_OWNER: `arjunsuresh` - * `_go` - - ENV variables: - - GITHUB_REPO_OWNER: `GATEOverflow` - * **`_mlcommons`** (default) - - ENV variables: - - GITHUB_REPO_OWNER: `mlcommons` - * `_nvidia-only` - - ENV variables: - - GITHUB_REPO_OWNER: `GATEOverflow` - - NVIDIA_ONLY: `yes` - -
- - - ##### Default variations - - `_mlcommons` -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_GIT_CHECKOUT: `master` - * CM_GIT_DEPTH: `--depth 1` - * CM_GIT_PATCH: `no` - - -#### Versions -Default version: `v3.1` - -* `v2.1` -* `v3.0` -* `v3.1` -* `v4.0` - -___ -#### Script output -```bash -cmr "get results inference inference-results mlcommons mlperf [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-src/index.md b/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-src/index.md deleted file mode 100644 index 44b4f4b4f..000000000 --- a/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-src/index.md +++ /dev/null @@ -1,192 +0,0 @@ -# get-mlperf-inference-src -Automatically generated README for this automation recipe: **get-mlperf-inference-src** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-src/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-src/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get src source inference inference-src inference-source mlperf mlcommons" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,src,source,inference,inference-src,inference-source,mlperf,mlcommons[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get src source inference inference-src inference-source mlperf mlcommons [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,src,source,inference,inference-src,inference-source,mlperf,mlcommons' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get src source inference inference-src inference-source mlperf mlcommons[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_3d-unet` - - ENV variables: - - CM_SUBMODULE_3D_UNET: `yes` - * `_deeplearningexamples` - - ENV variables: - - CM_SUBMODULE_DEEPLEARNINGEXAMPLES: `yes` - * `_deepsparse` - - ENV variables: - - CM_GIT_CHECKOUT: `deepsparse` - - CM_GIT_URL: `https://github.com/neuralmagic/inference` - - CM_MLPERF_LAST_RELEASE: `v4.0` - * `_gn` - - ENV variables: - - CM_SUBMODULE_GN: `yes` - * `_no-recurse-submodules` - - ENV variables: - - CM_GIT_RECURSE_SUBMODULES: `` - * `_nvidia-pycocotools` - - ENV variables: - - CM_GIT_PATCH_FILENAME: `coco.patch` - * `_octoml` - - ENV variables: - - CM_GIT_URL: `https://github.com/octoml/inference` - * `_openimages-nvidia-pycocotools` - - ENV variables: - - CM_GIT_PATCH_FILENAME: `openimages-pycocotools.patch` - * `_patch` - - ENV variables: - - CM_GIT_PATCH: `yes` - * `_pybind` - - ENV variables: - - CM_SUBMODULE_PYBIND: `yes` - * `_recurse-submodules` - - ENV variables: - - CM_GIT_RECURSE_SUBMODULES: ` --recurse-submodules` - * `_repo.#` - - ENV variables: - - CM_GIT_URL: `#` - * `_submodules.#` - - ENV variables: - - CM_GIT_SUBMODULES: `#` - -
- - - * Group "**checkout**" -
- Click here to expand this section. - - * `_branch.#` - - ENV variables: - - CM_GIT_CHECKOUT: `#` - * `_sha.#` - - ENV variables: - - CM_GIT_SHA: `#` - -
- - - * Group "**git-history**" -
- Click here to expand this section. - - * `_full-history` - - ENV variables: - - CM_GIT_DEPTH: `` - * **`_short-history`** (default) - - ENV variables: - - CM_GIT_DEPTH: `--depth 10` - -
- - - ##### Default variations - - `_short-history` -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_GIT_CHECKOUT_FOLDER: `inference` - * CM_GIT_DEPTH: `--depth 4` - * CM_GIT_PATCH: `no` - * CM_GIT_RECURSE_SUBMODULES: `` - * CM_GIT_URL: `https://github.com/mlcommons/inference.git` - - -#### Versions -Default version: `master` - -* `custom` -* `deepsparse` -* `main` -* `master` -* `pybind_fix` -* `r2.1` -* `r3.0` -* `r3.1` -* `tvm` - -___ -#### Script output -```bash -cmr "get src source inference inference-src inference-source mlperf mlcommons [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-submission-dir/index.md b/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-submission-dir/index.md deleted file mode 100644 index d6375c0dc..000000000 --- a/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-submission-dir/index.md +++ /dev/null @@ -1,111 +0,0 @@ -# get-mlperf-inference-submission-dir -Automatically generated README for this automation recipe: **get-mlperf-inference-submission-dir** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-submission-dir/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get mlperf inference submission dir directory" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,mlperf,inference,submission,dir,directory[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get mlperf inference submission dir directory [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,mlperf,inference,submission,dir,directory' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get mlperf inference submission dir directory[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**version**" -
- Click here to expand this section. - - * `_version.#` - - ENV variables: - - CM_MLPERF_INFERENCE_SUBMISSION_VERSION: `#` - * **`_version.4_0`** (default) - - ENV variables: - - CM_MLPERF_INFERENCE_SUBMISSION_VERSION: `4_0` - -
- - - ##### Default variations - - `_version.4_0` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` - - - - -___ -#### Script output -```bash -cmr "get mlperf inference submission dir directory [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-sut-configs/index.md b/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-sut-configs/index.md deleted file mode 100644 index 3aa4926b3..000000000 --- a/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-sut-configs/index.md +++ /dev/null @@ -1,103 +0,0 @@ -# get-mlperf-inference-sut-configs -Automatically generated README for this automation recipe: **get-mlperf-inference-sut-configs** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-sut-configs/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-sut-configs/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get mlperf inference sut configs sut-configs" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,mlperf,inference,sut,configs,sut-configs [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get mlperf inference sut configs sut-configs " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,mlperf,inference,sut,configs,sut-configs' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get mlperf inference sut configs sut-configs" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--configs_git_url=value` → `CM_GIT_URL=value` - * `--repo_path=value` → `CM_SUT_CONFIGS_PATH=value` - * `--run_config=value` → `CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_SUT_CONFIGS_PATH: `` - * CM_GIT_URL: `` - - - -___ -#### Script output -```bash -cmr "get mlperf inference sut configs sut-configs " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-sut-description/index.md b/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-sut-description/index.md deleted file mode 100644 index 7082c8a80..000000000 --- a/docs/scripts/MLPerf-benchmark-support/get-mlperf-inference-sut-description/index.md +++ /dev/null @@ -1,100 +0,0 @@ -# get-mlperf-inference-sut-description -Automatically generated README for this automation recipe: **get-mlperf-inference-sut-description** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-inference-sut-description/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get mlperf sut description system-under-test system-description" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,mlperf,sut,description,system-under-test,system-description [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get mlperf sut description system-under-test system-description " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,mlperf,sut,description,system-under-test,system-description' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get mlperf sut description system-under-test system-description" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--name=value` → `CM_HW_NAME=value` - * `--submitter=value` → `CM_MLPERF_SUBMITTER=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_SUT_DESC_CACHE: `no` - - - -___ -#### Script output -```bash -cmr "get mlperf sut description system-under-test system-description " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/get-mlperf-logging/index.md b/docs/scripts/MLPerf-benchmark-support/get-mlperf-logging/index.md deleted file mode 100644 index ce64cb510..000000000 --- a/docs/scripts/MLPerf-benchmark-support/get-mlperf-logging/index.md +++ /dev/null @@ -1,81 +0,0 @@ -# get-mlperf-logging -Automatically generated README for this automation recipe: **get-mlperf-logging** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-logging/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-logging/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get mlperf logging mlperf-logging" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,mlperf,logging,mlperf-logging - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get mlperf logging mlperf-logging " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,mlperf,logging,mlperf-logging' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get mlperf logging mlperf-logging" - ``` -___ - - -___ -#### Script output -```bash -cmr "get mlperf logging mlperf-logging " -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/get-mlperf-power-dev/index.md b/docs/scripts/MLPerf-benchmark-support/get-mlperf-power-dev/index.md deleted file mode 100644 index d3cfef35e..000000000 --- a/docs/scripts/MLPerf-benchmark-support/get-mlperf-power-dev/index.md +++ /dev/null @@ -1,134 +0,0 @@ -# get-mlperf-power-dev -Automatically generated README for this automation recipe: **get-mlperf-power-dev** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-power-dev/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get src source power power-dev mlperf mlcommons" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,src,source,power,power-dev,mlperf,mlcommons[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get src source power power-dev mlperf mlcommons [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,src,source,power,power-dev,mlperf,mlcommons' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get src source power power-dev mlperf mlcommons[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**checkout**" -
- Click here to expand this section. - - * `_branch.#` - - ENV variables: - - CM_GIT_CHECKOUT: `#` - * `_sha.#` - - ENV variables: - - CM_GIT_SHA: `#` - * `_tag.#` - - ENV variables: - - CM_GIT_CHECKOUT_TAG: `#` - -
- - - * Group "**repo**" -
- Click here to expand this section. - - * **`_mlcommons`** (default) - - ENV variables: - - CM_GIT_URL: `https://github.com/mlcommons/power-dev.git` - * `_octoml` - - ENV variables: - - CM_GIT_URL: `https://github.com/octoml/power-dev.git` - * `_repo.#` - - ENV variables: - - CM_GIT_URL: `#` - -
- - - ##### Default variations - - `_mlcommons` -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_GIT_DEPTH: `--depth 1` - * CM_GIT_PATCH: `no` - * CM_GIT_CHECKOUT_FOLDER: `power-dev` - - - -___ -#### Script output -```bash -cmr "get src source power power-dev mlperf mlcommons [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/get-mlperf-tiny-eembc-energy-runner-src/index.md b/docs/scripts/MLPerf-benchmark-support/get-mlperf-tiny-eembc-energy-runner-src/index.md deleted file mode 100644 index dab580fbd..000000000 --- a/docs/scripts/MLPerf-benchmark-support/get-mlperf-tiny-eembc-energy-runner-src/index.md +++ /dev/null @@ -1,99 +0,0 @@ -# get-mlperf-tiny-eembc-energy-runner-src -Automatically generated README for this automation recipe: **get-mlperf-tiny-eembc-energy-runner-src** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-tiny-eembc-energy-runner-src/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get src source eembc energyrunner energy-runner eembc-energy-runner tinymlperf-energy-runner" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,src,source,eembc,energyrunner,energy-runner,eembc-energy-runner,tinymlperf-energy-runner - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get src source eembc energyrunner energy-runner eembc-energy-runner tinymlperf-energy-runner " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,src,source,eembc,energyrunner,energy-runner,eembc-energy-runner,tinymlperf-energy-runner' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get src source eembc energyrunner energy-runner eembc-energy-runner tinymlperf-energy-runner" - ``` -___ - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_GIT_CHECKOUT: `main` - * CM_GIT_PATCH: `no` - * CM_GIT_RECURSE_SUBMODULES: `` - * CM_GIT_URL: `https://github.com/eembc/energyrunner` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-tiny-eembc-energy-runner-src/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-tiny-eembc-energy-runner-src/run.bat) -___ -#### Script output -```bash -cmr "get src source eembc energyrunner energy-runner eembc-energy-runner tinymlperf-energy-runner " -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/get-mlperf-tiny-src/index.md b/docs/scripts/MLPerf-benchmark-support/get-mlperf-tiny-src/index.md deleted file mode 100644 index 9c76d468e..000000000 --- a/docs/scripts/MLPerf-benchmark-support/get-mlperf-tiny-src/index.md +++ /dev/null @@ -1,99 +0,0 @@ -# get-mlperf-tiny-src -Automatically generated README for this automation recipe: **get-mlperf-tiny-src** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-tiny-src/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get src source tiny tiny-src tiny-source tinymlperf tinymlperf-src mlperf mlcommons" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,src,source,tiny,tiny-src,tiny-source,tinymlperf,tinymlperf-src,mlperf,mlcommons - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get src source tiny tiny-src tiny-source tinymlperf tinymlperf-src mlperf mlcommons " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,src,source,tiny,tiny-src,tiny-source,tinymlperf,tinymlperf-src,mlperf,mlcommons' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get src source tiny tiny-src tiny-source tinymlperf tinymlperf-src mlperf mlcommons" - ``` -___ - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_GIT_CHECKOUT: `master` - * CM_GIT_PATCH: `no` - * CM_GIT_RECURSE_SUBMODULES: `` - * CM_GIT_URL: `https://github.com/mlcommons/tiny.git` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-tiny-src/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-tiny-src/run.bat) -___ -#### Script output -```bash -cmr "get src source tiny tiny-src tiny-source tinymlperf tinymlperf-src mlperf mlcommons " -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/get-mlperf-training-nvidia-code/index.md b/docs/scripts/MLPerf-benchmark-support/get-mlperf-training-nvidia-code/index.md deleted file mode 100644 index 3ee1a15ac..000000000 --- a/docs/scripts/MLPerf-benchmark-support/get-mlperf-training-nvidia-code/index.md +++ /dev/null @@ -1,112 +0,0 @@ -# get-mlperf-training-nvidia-code -Automatically generated README for this automation recipe: **get-mlperf-training-nvidia-code** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-training-nvidia-code/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get nvidia mlperf training code training-code" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,nvidia,mlperf,training,code,training-code[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get nvidia mlperf training code training-code [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,nvidia,mlperf,training,code,training-code' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get nvidia mlperf training code training-code[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**repo-owner**" -
- Click here to expand this section. - - * `_ctuning` - - ENV variables: - - CM_TMP_TRAINING_SRC: `ctuning` - * `_custom` - * **`_mlcommons`** (default) - - ENV variables: - - CM_TMP_TRAINING_SRC: `mlcommons` - * `_nvidia-only` - - ENV variables: - - CM_TMP_TRAINING_SRC: `GATEOverflow` - -
- - - ##### Default variations - - `_mlcommons` -#### Versions -Default version: `r3.0` - -* `r2.1` -* `r3.0` -* `r3.1` - -___ -#### Script output -```bash -cmr "get nvidia mlperf training code training-code [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/get-mlperf-training-src/index.md b/docs/scripts/MLPerf-benchmark-support/get-mlperf-training-src/index.md deleted file mode 100644 index ac0c7803e..000000000 --- a/docs/scripts/MLPerf-benchmark-support/get-mlperf-training-src/index.md +++ /dev/null @@ -1,181 +0,0 @@ -# get-mlperf-training-src -Automatically generated README for this automation recipe: **get-mlperf-training-src** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-training-src/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-mlperf-training-src/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get src source training training-src training-source mlperf mlcommons" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,src,source,training,training-src,training-source,mlperf,mlcommons[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get src source training training-src training-source mlperf mlcommons [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,src,source,training,training-src,training-source,mlperf,mlcommons' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get src source training training-src training-source mlperf mlcommons[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_no-recurse-submodules` - - ENV variables: - - CM_GIT_RECURSE_SUBMODULES: `` - * `_nvidia-retinanet` - - ENV variables: - - CM_GIT_PATCH_FILENAMES: `nvidia-retinanet.patch,cpu_load.patch` - * `_patch` - - ENV variables: - - CM_GIT_PATCH: `yes` - -
- - - * Group "**checkout**" -
- Click here to expand this section. - - * `_branch.#` - - ENV variables: - - CM_GIT_CHECKOUT: `#` - * `_sha.#` - - ENV variables: - - CM_GIT_SHA: `#` - * `_tag.#` - - ENV variables: - - CM_GIT_CHECKOUT_TAG: `#` - -
- - - * Group "**git-history**" -
- Click here to expand this section. - - * `_full-history` - - ENV variables: - - CM_GIT_DEPTH: `` - * **`_short-history`** (default) - - ENV variables: - - CM_GIT_DEPTH: `--depth 5` - -
- - - * Group "**repo**" -
- Click here to expand this section. - - * `_repo.#` - - ENV variables: - - CM_GIT_URL: `#` - -
- - - * Group "**src**" -
- Click here to expand this section. - - * **`_cknowledge`** (default) - - ENV variables: - - CM_GIT_URL: `https://github.com/cknowledge/training.git` - * `_mlcommons` - - ENV variables: - - CM_GIT_URL: `https://github.com/mlcommons/training.git` - -
- - - ##### Default variations - - `_cknowledge,_short-history` -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_GIT_CHECKOUT: `master` - * CM_GIT_DEPTH: `--depth 4` - * CM_GIT_PATCH: `no` - * CM_GIT_RECURSE_SUBMODULES: ` --recurse-submodules` - * CM_GIT_CHECKOUT_FOLDER: `training` - - -#### Versions -Default version: `master` - -* `custom` -* `master` - -___ -#### Script output -```bash -cmr "get src source training training-src training-source mlperf mlcommons [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/get-nvidia-mitten/index.md b/docs/scripts/MLPerf-benchmark-support/get-nvidia-mitten/index.md deleted file mode 100644 index 8746bdac3..000000000 --- a/docs/scripts/MLPerf-benchmark-support/get-nvidia-mitten/index.md +++ /dev/null @@ -1,90 +0,0 @@ -# get-nvidia-mitten -Automatically generated README for this automation recipe: **get-nvidia-mitten** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-nvidia-mitten/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-nvidia-mitten/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get nvidia mitten nvidia-mitten" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,nvidia,mitten,nvidia-mitten - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get nvidia mitten nvidia-mitten " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,nvidia,mitten,nvidia-mitten' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get nvidia mitten nvidia-mitten" - ``` -___ - -#### Versions -Default version: `master` - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-nvidia-mitten/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-nvidia-mitten/run.bat) -___ -#### Script output -```bash -cmr "get nvidia mitten nvidia-mitten " -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/get-spec-ptd/index.md b/docs/scripts/MLPerf-benchmark-support/get-spec-ptd/index.md deleted file mode 100644 index f2c9e85c7..000000000 --- a/docs/scripts/MLPerf-benchmark-support/get-spec-ptd/index.md +++ /dev/null @@ -1,121 +0,0 @@ -# get-spec-ptd -Automatically generated README for this automation recipe: **get-spec-ptd** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-spec-ptd/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-spec-ptd/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get spec ptd ptdaemon power daemon power-daemon mlperf mlcommons" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,spec,ptd,ptdaemon,power,daemon,power-daemon,mlperf,mlcommons [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get spec ptd ptdaemon power daemon power-daemon mlperf mlcommons " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,spec,ptd,ptdaemon,power,daemon,power-daemon,mlperf,mlcommons' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get spec ptd ptdaemon power daemon power-daemon mlperf mlcommons" [--input_flags] - ``` -___ - -=== "Input Flags" - - - #### Input Flags - - * --**input:** Path to SPEC PTDaemon (Optional) -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--input=value` → `CM_INPUT=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_GIT_CHECKOUT: `main` - * CM_GIT_DEPTH: `--depth 1` - * CM_GIT_PATCH: `no` - * CM_GIT_RECURSE_SUBMODULES: ` ` - * CM_GIT_URL: `https://github.com/mlcommons/power.git` - - -#### Versions -Default version: `main` - -* `custom` -* `main` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-spec-ptd/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get spec ptd ptdaemon power daemon power-daemon mlperf mlcommons " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/import-mlperf-inference-to-experiment/index.md b/docs/scripts/MLPerf-benchmark-support/import-mlperf-inference-to-experiment/index.md deleted file mode 100644 index 8beb80672..000000000 --- a/docs/scripts/MLPerf-benchmark-support/import-mlperf-inference-to-experiment/index.md +++ /dev/null @@ -1,107 +0,0 @@ -# import-mlperf-inference-to-experiment -Automatically generated README for this automation recipe: **import-mlperf-inference-to-experiment** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - -Developers: [Grigori Fursin](https://cKnowledge.org/gfursin) -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/import-mlperf-inference-to-experiment/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/import-mlperf-inference-to-experiment/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "import mlperf inference mlperf-inference experiment 2experiment to-experiment" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=import,mlperf,inference,mlperf-inference,experiment,2experiment,to-experiment[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "import mlperf inference mlperf-inference experiment 2experiment to-experiment [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'import,mlperf,inference,mlperf-inference,experiment,2experiment,to-experiment' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "import mlperf inference mlperf-inference experiment 2experiment to-experiment[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_skip_checker` - - ENV variables: - - CM_SKIP_SUBMISSION_CHECKER: `True` - -
- -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--submitter=value` → `CM_MLPERF_SUBMITTER=value` - * `--target_repo=value` → `CM_IMPORT_MLPERF_INFERENCE_TARGET_REPO=value` - - - - -___ -#### Script output -```bash -cmr "import mlperf inference mlperf-inference experiment 2experiment to-experiment [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/import-mlperf-tiny-to-experiment/index.md b/docs/scripts/MLPerf-benchmark-support/import-mlperf-tiny-to-experiment/index.md deleted file mode 100644 index ee0aa4edd..000000000 --- a/docs/scripts/MLPerf-benchmark-support/import-mlperf-tiny-to-experiment/index.md +++ /dev/null @@ -1,91 +0,0 @@ -# import-mlperf-tiny-to-experiment -Automatically generated README for this automation recipe: **import-mlperf-tiny-to-experiment** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - -Developers: [Grigori Fursin](https://cKnowledge.org/gfursin) -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/import-mlperf-tiny-to-experiment/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/import-mlperf-tiny-to-experiment/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "import mlperf tiny mlperf-tiny experiment 2experiment to-experiment" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=import,mlperf,tiny,mlperf-tiny,experiment,2experiment,to-experiment [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "import mlperf tiny mlperf-tiny experiment 2experiment to-experiment " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'import,mlperf,tiny,mlperf-tiny,experiment,2experiment,to-experiment' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "import mlperf tiny mlperf-tiny experiment 2experiment to-experiment" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--target_repo=value` → `CM_IMPORT_TINYMLPERF_TARGET_REPO=value` - - - - -___ -#### Script output -```bash -cmr "import mlperf tiny mlperf-tiny experiment 2experiment to-experiment " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/import-mlperf-training-to-experiment/index.md b/docs/scripts/MLPerf-benchmark-support/import-mlperf-training-to-experiment/index.md deleted file mode 100644 index edda35499..000000000 --- a/docs/scripts/MLPerf-benchmark-support/import-mlperf-training-to-experiment/index.md +++ /dev/null @@ -1,97 +0,0 @@ -# import-mlperf-training-to-experiment -Automatically generated README for this automation recipe: **import-mlperf-training-to-experiment** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - -Developers: [Grigori Fursin](https://cKnowledge.org/gfursin) -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/import-mlperf-training-to-experiment/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/import-mlperf-training-to-experiment/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "import mlperf training mlperf-training experiment 2experiment to-experiment" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=import,mlperf,training,mlperf-training,experiment,2experiment,to-experiment [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "import mlperf training mlperf-training experiment 2experiment to-experiment " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'import,mlperf,training,mlperf-training,experiment,2experiment,to-experiment' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "import mlperf training mlperf-training experiment 2experiment to-experiment" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--target_repo=value` → `CM_IMPORT_MLPERF_TRAINING_TARGET_REPO=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run_mlperf_logger.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/import-mlperf-training-to-experiment/run_mlperf_logger.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "import mlperf training mlperf-training experiment 2experiment to-experiment " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/index.md b/docs/scripts/MLPerf-benchmark-support/index.md deleted file mode 100644 index b4011e7ce..000000000 --- a/docs/scripts/MLPerf-benchmark-support/index.md +++ /dev/null @@ -1,41 +0,0 @@ -* [add-custom-nvidia-system](add-custom-nvidia-system/index.md) -* [benchmark-any-mlperf-inference-implementation](benchmark-any-mlperf-inference-implementation/index.md) -* [build-mlperf-inference-server-nvidia](build-mlperf-inference-server-nvidia/index.md) -* [generate-mlperf-inference-submission](generate-mlperf-inference-submission/index.md) -* [generate-mlperf-inference-user-conf](generate-mlperf-inference-user-conf/index.md) -* [generate-mlperf-tiny-report](generate-mlperf-tiny-report/index.md) -* [generate-mlperf-tiny-submission](generate-mlperf-tiny-submission/index.md) -* [generate-nvidia-engine](generate-nvidia-engine/index.md) -* [get-mlperf-inference-intel-scratch-space](get-mlperf-inference-intel-scratch-space/index.md) -* [get-mlperf-inference-loadgen](get-mlperf-inference-loadgen/index.md) -* [get-mlperf-inference-nvidia-common-code](get-mlperf-inference-nvidia-common-code/index.md) -* [get-mlperf-inference-nvidia-scratch-space](get-mlperf-inference-nvidia-scratch-space/index.md) -* [get-mlperf-inference-results](get-mlperf-inference-results/index.md) -* [get-mlperf-inference-results-dir](get-mlperf-inference-results-dir/index.md) -* [get-mlperf-inference-src](get-mlperf-inference-src/index.md) -* [get-mlperf-inference-submission-dir](get-mlperf-inference-submission-dir/index.md) -* [get-mlperf-inference-sut-configs](get-mlperf-inference-sut-configs/index.md) -* [get-mlperf-inference-sut-description](get-mlperf-inference-sut-description/index.md) -* [get-mlperf-logging](get-mlperf-logging/index.md) -* [get-mlperf-power-dev](get-mlperf-power-dev/index.md) -* [get-mlperf-tiny-eembc-energy-runner-src](get-mlperf-tiny-eembc-energy-runner-src/index.md) -* [get-mlperf-tiny-src](get-mlperf-tiny-src/index.md) -* [get-mlperf-training-nvidia-code](get-mlperf-training-nvidia-code/index.md) -* [get-mlperf-training-src](get-mlperf-training-src/index.md) -* [get-nvidia-mitten](get-nvidia-mitten/index.md) -* [get-spec-ptd](get-spec-ptd/index.md) -* [import-mlperf-inference-to-experiment](import-mlperf-inference-to-experiment/index.md) -* [import-mlperf-tiny-to-experiment](import-mlperf-tiny-to-experiment/index.md) -* [import-mlperf-training-to-experiment](import-mlperf-training-to-experiment/index.md) -* [install-mlperf-logging-from-src](install-mlperf-logging-from-src/index.md) -* [prepare-training-data-bert](prepare-training-data-bert/index.md) -* [prepare-training-data-resnet](prepare-training-data-resnet/index.md) -* [preprocess-mlperf-inference-submission](preprocess-mlperf-inference-submission/index.md) -* [process-mlperf-accuracy](process-mlperf-accuracy/index.md) -* [push-mlperf-inference-results-to-github](push-mlperf-inference-results-to-github/index.md) -* [run-mlperf-inference-mobilenet-models](run-mlperf-inference-mobilenet-models/index.md) -* [run-mlperf-inference-submission-checker](run-mlperf-inference-submission-checker/index.md) -* [run-mlperf-power-client](run-mlperf-power-client/index.md) -* [run-mlperf-power-server](run-mlperf-power-server/index.md) -* [run-mlperf-training-submission-checker](run-mlperf-training-submission-checker/index.md) -* [truncate-mlperf-inference-accuracy-log](truncate-mlperf-inference-accuracy-log/index.md) diff --git a/docs/scripts/MLPerf-benchmark-support/install-mlperf-logging-from-src/index.md b/docs/scripts/MLPerf-benchmark-support/install-mlperf-logging-from-src/index.md deleted file mode 100644 index 5b673d37d..000000000 --- a/docs/scripts/MLPerf-benchmark-support/install-mlperf-logging-from-src/index.md +++ /dev/null @@ -1,89 +0,0 @@ -# install-mlperf-logging-from-src -Automatically generated README for this automation recipe: **install-mlperf-logging-from-src** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/install-mlperf-logging-from-src/_cm.yaml)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install mlperf logging from.src" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,mlperf,logging,from.src - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install mlperf logging from.src " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,mlperf,logging,from.src' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install mlperf logging from.src" - ``` -___ - -#### Versions -* `master` -* `v3.1` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-mlperf-logging-from-src/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "install mlperf logging from.src " -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/prepare-training-data-bert/index.md b/docs/scripts/MLPerf-benchmark-support/prepare-training-data-bert/index.md deleted file mode 100644 index 9b3b8d1bc..000000000 --- a/docs/scripts/MLPerf-benchmark-support/prepare-training-data-bert/index.md +++ /dev/null @@ -1,120 +0,0 @@ -# prepare-training-data-bert -Automatically generated README for this automation recipe: **prepare-training-data-bert** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/prepare-training-data-bert/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "prepare mlperf training data input bert" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=prepare,mlperf,training,data,input,bert[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "prepare mlperf training data input bert [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'prepare,mlperf,training,data,input,bert' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "prepare mlperf training data input bert[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**implementation**" -
- Click here to expand this section. - - * **`_nvidia`** (default) - - ENV variables: - - CM_TMP_VARIATION: `nvidia` - * `_reference` - - ENV variables: - - CM_TMP_VARIATION: `reference` - -
- - - ##### Default variations - - `_nvidia` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--clean=value` → `CM_MLPERF_TRAINING_CLEAN_TFRECORDS=value` - * `--data_dir=value` → `CM_DATA_DIR=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run-nvidia.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/prepare-training-data-bert/run-nvidia.sh) - * [run-reference.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/prepare-training-data-bert/run-reference.sh) - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/prepare-training-data-bert/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "prepare mlperf training data input bert [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/prepare-training-data-resnet/index.md b/docs/scripts/MLPerf-benchmark-support/prepare-training-data-resnet/index.md deleted file mode 100644 index 1f4f11347..000000000 --- a/docs/scripts/MLPerf-benchmark-support/prepare-training-data-resnet/index.md +++ /dev/null @@ -1,129 +0,0 @@ -# prepare-training-data-resnet -Automatically generated README for this automation recipe: **prepare-training-data-resnet** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/prepare-training-data-resnet/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "prepare mlperf training data input resnet" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=prepare,mlperf,training,data,input,resnet[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "prepare mlperf training data input resnet [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'prepare,mlperf,training,data,input,resnet' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "prepare mlperf training data input resnet[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_mxnet.#` - - ENV variables: - - CM_MXNET_VERSION: `#` - -
- - - * Group "**implementation**" -
- Click here to expand this section. - - * **`_nvidia`** (default) - - ENV variables: - - CM_TMP_VARIATION: `nvidia` - * `_reference` - - ENV variables: - - CM_TMP_VARIATION: `reference` - -
- - - ##### Default variations - - `_nvidia` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--data_dir=value` → `CM_DATA_DIR=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run-nvidia.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/prepare-training-data-resnet/run-nvidia.sh) - * [run-reference.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/prepare-training-data-resnet/run-reference.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "prepare mlperf training data input resnet [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/preprocess-mlperf-inference-submission/index.md b/docs/scripts/MLPerf-benchmark-support/preprocess-mlperf-inference-submission/index.md deleted file mode 100644 index 79f70a3c0..000000000 --- a/docs/scripts/MLPerf-benchmark-support/preprocess-mlperf-inference-submission/index.md +++ /dev/null @@ -1,96 +0,0 @@ -# preprocess-mlperf-inference-submission -Automatically generated README for this automation recipe: **preprocess-mlperf-inference-submission** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/preprocess-mlperf-inference-submission/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "run mlc mlcommons mlperf inference submission mlperf-inference processor preprocessor preprocess" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=run,mlc,mlcommons,mlperf,inference,submission,mlperf-inference,processor,preprocessor,preprocess [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "run mlc mlcommons mlperf inference submission mlperf-inference processor preprocessor preprocess " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'run,mlc,mlcommons,mlperf,inference,submission,mlperf-inference,processor,preprocessor,preprocess' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "run mlc mlcommons mlperf inference submission mlperf-inference processor preprocessor preprocess" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` - * `--submitter=value` → `CM_MLPERF_SUBMITTER=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/preprocess-mlperf-inference-submission/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "run mlc mlcommons mlperf inference submission mlperf-inference processor preprocessor preprocess " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/process-mlperf-accuracy/index.md b/docs/scripts/MLPerf-benchmark-support/process-mlperf-accuracy/index.md deleted file mode 100644 index bd5afa8b2..000000000 --- a/docs/scripts/MLPerf-benchmark-support/process-mlperf-accuracy/index.md +++ /dev/null @@ -1,177 +0,0 @@ -# process-mlperf-accuracy -Automatically generated README for this automation recipe: **process-mlperf-accuracy** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/process-mlperf-accuracy/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "run mlperf mlcommons accuracy mlc process process-accuracy" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=run,mlperf,mlcommons,accuracy,mlc,process,process-accuracy[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "run mlperf mlcommons accuracy mlc process process-accuracy [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'run,mlperf,mlcommons,accuracy,mlc,process,process-accuracy' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "run mlperf mlcommons accuracy mlc process process-accuracy[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**coco-evaluation-tool**" -
- Click here to expand this section. - - * **`_default-pycocotools`** (default) - * `_nvidia-pycocotools` - -
- - - * Group "**dataset**" -
- Click here to expand this section. - - * `_cnndm` - - ENV variables: - - CM_DATASET: `cnndm` - * `_coco2014` - - ENV variables: - - CM_DATASET: `coco2014` - * **`_imagenet`** (default) - - ENV variables: - - CM_DATASET: `imagenet` - * `_kits19` - - ENV variables: - - CM_DATASET: `kits19` - * `_librispeech` - - ENV variables: - - CM_DATASET: `librispeech` - * `_open-orca` - - ENV variables: - - CM_DATASET: `openorca` - * `_openimages` - - ENV variables: - - CM_DATASET: `openimages` - * `_squad` - - ENV variables: - - CM_DATASET: `squad` - * `_terabyte` - - ENV variables: - - CM_DATASET: `squad` - -
- - - * Group "**precision**" -
- Click here to expand this section. - - * `_float16` - - ENV variables: - - CM_ACCURACY_DTYPE: `float16` - * **`_float32`** (default) - - ENV variables: - - CM_ACCURACY_DTYPE: `float32` - * `_float64` - - ENV variables: - - CM_ACCURACY_DTYPE: `float64` - * `_int16` - - ENV variables: - - CM_ACCURACY_DTYPE: `int16` - * `_int32` - - ENV variables: - - CM_ACCURACY_DTYPE: `int32` - * `_int64` - - ENV variables: - - CM_ACCURACY_DTYPE: `int64` - * `_int8` - - ENV variables: - - CM_ACCURACY_DTYPE: `int8` - -
- - - ##### Default variations - - `_default-pycocotools,_float32,_imagenet` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--result_dir=value` → `CM_MLPERF_ACCURACY_RESULTS_DIR=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/process-mlperf-accuracy/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/process-mlperf-accuracy/run.bat) -___ -#### Script output -```bash -cmr "run mlperf mlcommons accuracy mlc process process-accuracy [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/push-mlperf-inference-results-to-github/index.md b/docs/scripts/MLPerf-benchmark-support/push-mlperf-inference-results-to-github/index.md deleted file mode 100644 index 2f3245a0b..000000000 --- a/docs/scripts/MLPerf-benchmark-support/push-mlperf-inference-results-to-github/index.md +++ /dev/null @@ -1,109 +0,0 @@ -# push-mlperf-inference-results-to-github -Automatically generated README for this automation recipe: **push-mlperf-inference-results-to-github** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/push-mlperf-inference-results-to-github/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "push mlperf mlperf-inference-results publish-results inference submission github" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=push,mlperf,mlperf-inference-results,publish-results,inference,submission,github [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "push mlperf mlperf-inference-results publish-results inference submission github " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'push,mlperf,mlperf-inference-results,publish-results,inference,submission,github' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "push mlperf mlperf-inference-results publish-results inference submission github" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--branch=value` → `CM_GIT_BRANCH=value` - * `--commit_message=value` → `CM_MLPERF_RESULTS_REPO_COMMIT_MESSAGE=value` - * `--repo_branch=value` → `CM_GIT_BRANCH=value` - * `--repo_url=value` → `CM_MLPERF_RESULTS_GIT_REPO_URL=value` - * `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_MLPERF_RESULTS_GIT_REPO_URL: `https://github.com/ctuning/mlperf_inference_submissions_v4.0` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/push-mlperf-inference-results-to-github/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "push mlperf mlperf-inference-results publish-results inference submission github " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/run-mlperf-inference-mobilenet-models/index.md b/docs/scripts/MLPerf-benchmark-support/run-mlperf-inference-mobilenet-models/index.md deleted file mode 100644 index 35bd027de..000000000 --- a/docs/scripts/MLPerf-benchmark-support/run-mlperf-inference-mobilenet-models/index.md +++ /dev/null @@ -1,326 +0,0 @@ -# run-mlperf-inference-mobilenet-models -Automatically generated README for this automation recipe: **run-mlperf-inference-mobilenet-models** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - - - ---- - -## Set up - -We need to get imagenet full dataset to make image-classification submissions for MLPerf inference. Since this dataset is not publicly available via a URL please follow the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) to download the dataset and register in CM. - -
-Click here to set up docker (Optional). - -### Docker Setup - -CM commands are expected to run natively but if you prefer not to modify the host system, you can do the below command to set up a docker container. - -``` -cm docker script --tags=run,mobilenet-models,_tflite,_accuracy-only \ ---adr.compiler.tags=gcc \ ---docker_cm_repo=mlcommons@cm4mlops \ ---imagenet_path=$HOME/imagenet-2012-val \ ---results_dir=$HOME/mobilenet_results \ ---submission_dir=$HOME/inference_submission_3.1 \ ---docker_skip_run_cmd -``` - -This command will build a docker container and give you an interactive shell from which you can execute the below CM run commands. -* `results_dir`, `submission_dir` and `imagenet_path` are mounted from the host system. -* `results_dir` and `submission_dir` are expected to be empty directories to be populated by the docker -* `imagenet_path` should point to the imagenet folder containing the 50000 validation images. - -
- -## Run Commands - -Since the runs can take many hours, in case you are running remotely you can install screen as follows. You may omit "screen" from all commands if you are running on a host system. -``` -cmr "get generic-sys-util _screen" -``` -### Default tflite - - -#### Do a full accuracy run for all the models (can take almost a day) - -``` -screen cmr "run mobilenet-models _tflite _accuracy-only" \ ---adr.compiler.tags=gcc \ ---results_dir=$HOME/mobilenet_results -``` - -#### Do a full performance run for all the models (can take almost a day) -``` -screen cmr "run mobilenet-models _tflite _performance-only" \ ---adr.compiler.tags=gcc \ ---results_dir=$HOME/mobilenet_results -``` - -#### Generate README files for all the runs -``` -cmr "run mobilenet-models _tflite _populate-readme" \ ---adr.compiler.tags=gcc \ ---results_dir=$HOME/mobilenet_results -``` - -#### Generate actual submission tree - -We should use the master branch of MLCommons inference repo for the submission checker. You can use `--hw_note_extra` option to add your name to the notes. -``` -cmr "generate inference submission" \ ---results_dir=$HOME/mobilenet_results/valid_results \ ---submission_dir=$HOME/mobilenet_submission_tree \ ---clean \ ---infer_scenario_results=yes \ ---adr.compiler.tags=gcc --adr.inference-src.version=master \ ---run-checker \ ---submitter=cTuning \ ---hw_notes_extra="Result taken by NAME" -``` -* Use `--hw_name="My system name"` to give a meaningful system name. Examples can be seen [here](https://github.com/mlcommons/inference_results_v3.0/tree/main/open/cTuning/systems) - -#### Push the results to GitHub repo - -First, create a fork of [this repo](https://github.com/ctuning/mlperf_inference_submissions_v3.1/). Then run the following command after replacing `--repo_url` with your fork URL. -``` -cmr "push github mlperf inference submission" \ ---submission_dir=$HOME/mobilenet_submission_tree \ ---repo_url=https://github.com/ctuning/mlperf_inference_submissions_v3.1/ \ ---commit_message="Mobilenet results added" -``` - -Create a PR to [cTuning repo](https://github.com/ctuning/mlperf_inference_submissions_v3.1/) - -### Using ARMNN with NEON - -Follow the same procedure as above but for the first three experiment runs add `_armnn,_neon` to the tags. For example -``` -cmr "run mobilenet-models _tflite _armnn _neon _accuracy-only" \ ---adr.compiler.tags=gcc \ ---results_dir=$HOME/mobilenet_results -``` - -`results_dir` and `submission_dir` can be the same as before as results will be going to different subfolders. - -### Using ARMNN with OpenCL -Follow the same procedure as above but for the first three experiment runs add `_armnn,_opencl` to the tags. For example -``` -cmr "run mobilenet-models _tflite _armnn _opencl _accuracy-only" \ ---adr.compiler.tags=gcc \ ---results_dir=$HOME/mobilenet_results -``` - -`results_dir` and `submission_dir` can be the same as before as results will be going to different subfolders. - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-inference-mobilenet-models/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "run mobilenet models image-classification mobilenet-models mlperf inference" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=run,mobilenet,models,image-classification,mobilenet-models,mlperf,inference[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "run mobilenet models image-classification mobilenet-models mlperf inference [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'run,mobilenet,models,image-classification,mobilenet-models,mlperf,inference' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "run mobilenet models image-classification mobilenet-models mlperf inference[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_armnn` - - ENV variables: - - CM_MLPERF_USE_ARMNN_LIBRARY: `yes` - * `_neon` - - Aliases: `_use-neon` - - ENV variables: - - CM_MLPERF_USE_NEON: `yes` - * `_only-fp32` - - ENV variables: - - CM_MLPERF_RUN_INT8: `no` - * `_only-int8` - - ENV variables: - - CM_MLPERF_RUN_FP32: `no` - * `_opencl` - - ENV variables: - - CM_MLPERF_USE_OPENCL: `yes` - -
- - - * Group "**base-framework**" -
- Click here to expand this section. - - * **`_tflite`** (default) - -
- - - * Group "**model-selection**" -
- Click here to expand this section. - - * **`_all-models`** (default) - - ENV variables: - - CM_MLPERF_RUN_MOBILENETS: `yes` - - CM_MLPERF_RUN_EFFICIENTNETS: `yes` - * `_efficientnet` - - ENV variables: - - CM_MLPERF_RUN_EFFICIENTNETS: `yes` - * `_mobilenet` - - ENV variables: - - CM_MLPERF_RUN_MOBILENETS: `yes` - -
- - - * Group "**optimization**" -
- Click here to expand this section. - - * **`_tflite-default`** (default) - - ENV variables: - - CM_MLPERF_TFLITE_DEFAULT_MODE: `yes` - -
- - - * Group "**run-mode**" -
- Click here to expand this section. - - * `_accuracy-only` - - ENV variables: - - CM_MLPERF_FIND_PERFORMANCE_MODE: `no` - - CM_MLPERF_ACCURACY_MODE: `yes` - - CM_MLPERF_SUBMISSION_MODE: `no` - * `_find-performance` - - ENV variables: - - CM_MLPERF_FIND_PERFORMANCE_MODE: `yes` - - CM_MLPERF_SUBMISSION_MODE: `no` - * `_performance-only` - - ENV variables: - - CM_MLPERF_FIND_PERFORMANCE_MODE: `no` - - CM_MLPERF_PERFORMANCE_MODE: `yes` - - CM_MLPERF_SUBMISSION_MODE: `no` - * `_populate-readme` - - ENV variables: - - CM_MLPERF_FIND_PERFORMANCE_MODE: `no` - - CM_MLPERF_POPULATE_README: `yes` - * `_submission` - - ENV variables: - - CM_MLPERF_FIND_PERFORMANCE_MODE: `no` - - CM_MLPERF_SUBMISSION_MODE: `yes` - -
- - - ##### Default variations - - `_all-models,_tflite,_tflite-default` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--find-performance=value` → `CM_MLPERF_FIND_PERFORMANCE_MODE=value` - * `--imagenet_path=value` → `IMAGENET_PATH=value` - * `--no-rerun=value` → `CM_MLPERF_NO_RERUN=value` - * `--power=value` → `CM_MLPERF_POWER=value` - * `--results_dir=value` → `CM_MLPERF_INFERENCE_RESULTS_DIR=value` - * `--submission=value` → `CM_MLPERF_SUBMISSION_MODE=value` - * `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_MLPERF_RUN_MOBILENETS: `no` - * CM_MLPERF_RUN_EFFICIENTNETS: `no` - * CM_MLPERF_NO_RERUN: `no` - * CM_MLPERF_RUN_FP32: `yes` - * CM_MLPERF_RUN_INT8: `yes` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-inference-mobilenet-models/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "run mobilenet models image-classification mobilenet-models mlperf inference [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/run-mlperf-inference-submission-checker/index.md b/docs/scripts/MLPerf-benchmark-support/run-mlperf-inference-submission-checker/index.md deleted file mode 100644 index 0231dcbd9..000000000 --- a/docs/scripts/MLPerf-benchmark-support/run-mlperf-inference-submission-checker/index.md +++ /dev/null @@ -1,138 +0,0 @@ -# run-mlperf-inference-submission-checker -Automatically generated README for this automation recipe: **run-mlperf-inference-submission-checker** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-inference-submission-checker/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-inference-submission-checker/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "run mlc mlcommons mlperf inference mlperf-inference submission checker submission-checker mlc-submission-checker" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=run,mlc,mlcommons,mlperf,inference,mlperf-inference,submission,checker,submission-checker,mlc-submission-checker[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "run mlc mlcommons mlperf inference mlperf-inference submission checker submission-checker mlc-submission-checker [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'run,mlc,mlcommons,mlperf,inference,mlperf-inference,submission,checker,submission-checker,mlc-submission-checker' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "run mlc mlcommons mlperf inference mlperf-inference submission checker submission-checker mlc-submission-checker[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_short-run` - - ENV variables: - - CM_MLPERF_SHORT_RUN: `yes` - -
- -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--extra_args=value` → `CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS=value` - * `--extra_model_benchmark_map=value` → `CM_MLPERF_EXTRA_MODEL_MAPPING=value` - * `--input=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` - * `--power=value` → `CM_MLPERF_POWER=value` - * `--push_to_github=value` → `CM_MLPERF_RESULT_PUSH_TO_GITHUB=value` - * `--skip_compliance=value` → `CM_MLPERF_SKIP_COMPLIANCE=value` - * `--skip_power_check=value` → `CM_MLPERF_SKIP_POWER_CHECK=value` - * `--src_version=value` → `CM_MLPERF_SUBMISSION_CHECKER_VERSION=value` - * `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` - * `--submitter=value` → `CM_MLPERF_SUBMITTER=value` - * `--tar=value` → `CM_TAR_SUBMISSION_DIR=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_MLPERF_SHORT_RUN: `no` - - -#### Versions -Default version: `master` - -* `master` -* `r3.0` -* `r3.1` -* `r4.0` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-inference-submission-checker/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-inference-submission-checker/run.bat) -___ -#### Script output -```bash -cmr "run mlc mlcommons mlperf inference mlperf-inference submission checker submission-checker mlc-submission-checker [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/run-mlperf-power-client/index.md b/docs/scripts/MLPerf-benchmark-support/run-mlperf-power-client/index.md deleted file mode 100644 index 657bf339f..000000000 --- a/docs/scripts/MLPerf-benchmark-support/run-mlperf-power-client/index.md +++ /dev/null @@ -1,119 +0,0 @@ -# run-mlperf-power-client -Automatically generated README for this automation recipe: **run-mlperf-power-client** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-power-client/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-power-client/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "run mlc mlcommons mlperf power client power-client" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=run,mlc,mlcommons,mlperf,power,client,power-client [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "run mlc mlcommons mlperf power client power-client " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'run,mlc,mlcommons,mlperf,power,client,power-client' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "run mlc mlcommons mlperf power client power-client" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--loadgen_logs_dir=value` → `CM_MLPERF_LOADGEN_LOGS_DIR=value` - * `--log_dir=value` → `CM_MLPERF_POWER_LOG_DIR=value` - * `--max_amps=value` → `CM_MLPERF_POWER_MAX_AMPS=value` - * `--max_volts=value` → `CM_MLPERF_POWER_MAX_VOLTS=value` - * `--ntp_server=value` → `CM_MLPERF_POWER_NTP_SERVER=value` - * `--port=value` → `CM_MLPERF_POWER_SERVER_PORT=value` - * `--power_server=value` → `CM_MLPERF_POWER_SERVER_ADDRESS=value` - * `--run_cmd=value` → `CM_MLPERF_RUN_CMD=value` - * `--server=value` → `CM_MLPERF_POWER_SERVER_ADDRESS=value` - * `--server_port=value` → `CM_MLPERF_POWER_SERVER_PORT=value` - * `--timestamp=value` → `CM_MLPERF_POWER_TIMESTAMP=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_MLPERF_POWER_LOG_DIR: `logs` - * CM_MLPERF_RUN_CMD: `` - * CM_MLPERF_POWER_SERVER_ADDRESS: `localhost` - * CM_MLPERF_POWER_NTP_SERVER: `time.google.com` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-power-client/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "run mlc mlcommons mlperf power client power-client " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/run-mlperf-power-server/index.md b/docs/scripts/MLPerf-benchmark-support/run-mlperf-power-server/index.md deleted file mode 100644 index be12a1dd3..000000000 --- a/docs/scripts/MLPerf-benchmark-support/run-mlperf-power-server/index.md +++ /dev/null @@ -1,116 +0,0 @@ -# run-mlperf-power-server -Automatically generated README for this automation recipe: **run-mlperf-power-server** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-power-server/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-power-server/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "run mlc mlcommons mlperf power server power-server" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=run,mlc,mlcommons,mlperf,power,server,power-server [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "run mlc mlcommons mlperf power server power-server " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'run,mlc,mlcommons,mlperf,power,server,power-server' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "run mlc mlcommons mlperf power server power-server" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--device_port=value` → `CM_MLPERF_POWER_DEVICE_PORT=value` - * `--device_type=value` → `CM_MLPERF_POWER_DEVICE_TYPE=value` - * `--interface_flag=value` → `CM_MLPERF_POWER_INTERFACE_FLAG=value` - * `--ntp_server=value` → `CM_MLPERF_POWER_NTP_SERVER=value` - * `--screen=value` → `CM_MLPERF_POWER_SERVER_USE_SCREEN=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_MLPERF_POWER_NTP_SERVER: `time.google.com` - * CM_MLPERF_POWER_INTERFACE_FLAG: `` - * CM_MLPERF_POWER_DEVICE_TYPE: `49` - * CM_MLPERF_POWER_SERVER_ADDRESS: `0.0.0.0` - * CM_MLPERF_POWER_SERVER_PORT: `4950` - * CM_MLPERF_POWER_DEVICE_PORT: `/dev/usbtmc0` - * CM_MLPERF_POWER_SERVER_USE_SCREEN: `no` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-power-server/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-power-server/run.bat) -___ -#### Script output -```bash -cmr "run mlc mlcommons mlperf power server power-server " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/run-mlperf-training-submission-checker/index.md b/docs/scripts/MLPerf-benchmark-support/run-mlperf-training-submission-checker/index.md deleted file mode 100644 index 863aeae5e..000000000 --- a/docs/scripts/MLPerf-benchmark-support/run-mlperf-training-submission-checker/index.md +++ /dev/null @@ -1,135 +0,0 @@ -# run-mlperf-training-submission-checker -Automatically generated README for this automation recipe: **run-mlperf-training-submission-checker** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-training-submission-checker/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "run mlc mlcommons mlperf training train mlperf-training submission checker submission-checker mlc-submission-checker" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=run,mlc,mlcommons,mlperf,training,train,mlperf-training,submission,checker,submission-checker,mlc-submission-checker[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "run mlc mlcommons mlperf training train mlperf-training submission checker submission-checker mlc-submission-checker [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'run,mlc,mlcommons,mlperf,training,train,mlperf-training,submission,checker,submission-checker,mlc-submission-checker' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "run mlc mlcommons mlperf training train mlperf-training submission checker submission-checker mlc-submission-checker[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_short-run` - - ENV variables: - - CM_MLPERF_SHORT_RUN: `yes` - -
- -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--extra_args=value` → `CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS=value` - * `--input=value` → `CM_MLPERF_SUBMISSION_DIR=value` - * `--power=value` → `CM_MLPERF_POWER=value` - * `--push_to_github=value` → `CM_MLPERF_RESULT_PUSH_TO_GITHUB=value` - * `--skip_compliance=value` → `CM_MLPERF_SKIP_COMPLIANCE=value` - * `--skip_power_check=value` → `CM_MLPERF_SKIP_POWER_CHECK=value` - * `--src_version=value` → `CM_MLPERF_SUBMISSION_CHECKER_VERSION=value` - * `--submission_dir=value` → `CM_MLPERF_SUBMISSION_DIR=value` - * `--submitter=value` → `CM_MLPERF_SUBMITTER=value` - * `--tar=value` → `CM_TAR_SUBMISSION_DIR=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_MLPERF_SHORT_RUN: `no` - - -#### Versions -Default version: `master` - -* `master` -* `r3.0` -* `r3.1` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-training-submission-checker/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "run mlc mlcommons mlperf training train mlperf-training submission checker submission-checker mlc-submission-checker [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/MLPerf-benchmark-support/truncate-mlperf-inference-accuracy-log/index.md b/docs/scripts/MLPerf-benchmark-support/truncate-mlperf-inference-accuracy-log/index.md deleted file mode 100644 index d0921c6d5..000000000 --- a/docs/scripts/MLPerf-benchmark-support/truncate-mlperf-inference-accuracy-log/index.md +++ /dev/null @@ -1,98 +0,0 @@ -# truncate-mlperf-inference-accuracy-log -Automatically generated README for this automation recipe: **truncate-mlperf-inference-accuracy-log** - -Category: **[MLPerf benchmark support](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/truncate-mlperf-inference-accuracy-log/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/truncate-mlperf-inference-accuracy-log/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "run mlc mlcommons mlperf inference mlperf-inference truncation truncator truncate accuracy accuracy-log accuracy-log-trancation accuracy-log-truncator mlc-accuracy-log-trancation mlc-accuracy-log-truncator" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=run,mlc,mlcommons,mlperf,inference,mlperf-inference,truncation,truncator,truncate,accuracy,accuracy-log,accuracy-log-trancation,accuracy-log-truncator,mlc-accuracy-log-trancation,mlc-accuracy-log-truncator [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "run mlc mlcommons mlperf inference mlperf-inference truncation truncator truncate accuracy accuracy-log accuracy-log-trancation accuracy-log-truncator mlc-accuracy-log-trancation mlc-accuracy-log-truncator " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'run,mlc,mlcommons,mlperf,inference,mlperf-inference,truncation,truncator,truncate,accuracy,accuracy-log,accuracy-log-trancation,accuracy-log-truncator,mlc-accuracy-log-trancation,mlc-accuracy-log-truncator' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "run mlc mlcommons mlperf inference mlperf-inference truncation truncator truncate accuracy accuracy-log accuracy-log-trancation accuracy-log-truncator mlc-accuracy-log-trancation mlc-accuracy-log-truncator" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--input=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` - * `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` - * `--submitter=value` → `CM_MLPERF_SUBMITTER=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/truncate-mlperf-inference-accuracy-log/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "run mlc mlcommons mlperf inference mlperf-inference truncation truncator truncate accuracy accuracy-log accuracy-log-trancation accuracy-log-truncator mlc-accuracy-log-trancation mlc-accuracy-log-truncator " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-onnx-py/index.md b/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-onnx-py/index.md deleted file mode 100644 index 981a09f07..000000000 --- a/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-onnx-py/index.md +++ /dev/null @@ -1,138 +0,0 @@ -# app-image-classification-onnx-py -Automatically generated README for this automation recipe: **app-image-classification-onnx-py** - -Category: **[Modular AI/ML application pipeline](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-onnx-py/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-onnx-py/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "modular python app image-classification onnx" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=modular,python,app,image-classification,onnx[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "modular python app image-classification onnx [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'modular,python,app,image-classification,onnx' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "modular python app image-classification onnx[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**target**" -
- Click here to expand this section. - - * **`_cpu`** (default) - - ENV variables: - - USE_CPU: `True` - * `_cuda` - - ENV variables: - - USE_CUDA: `True` - -
- - - ##### Default variations - - `_cpu` -=== "Input Flags" - - - #### Input Flags - - * --**input:** Path to JPEG image to classify - * --**output:** Output directory (optional) - * --**j:** Print JSON output -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--input=value` → `CM_IMAGE=value` - * `--output=value` → `CM_APP_IMAGE_CLASSIFICATION_ONNX_PY_OUTPUT=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_BATCH_COUNT: `1` - * CM_BATCH_SIZE: `1` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-onnx-py/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-onnx-py/run.bat) -___ -#### Script output -```bash -cmr "modular python app image-classification onnx [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tf-onnx-cpp/index.md b/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tf-onnx-cpp/index.md deleted file mode 100644 index bdb43e6f0..000000000 --- a/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tf-onnx-cpp/index.md +++ /dev/null @@ -1,98 +0,0 @@ -# app-image-classification-tf-onnx-cpp -Automatically generated README for this automation recipe: **app-image-classification-tf-onnx-cpp** - -Category: **[Modular AI/ML application pipeline](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-tf-onnx-cpp/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-tf-onnx-cpp/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "app image-classification cpp tensorflow onnx" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=app,image-classification,cpp,tensorflow,onnx - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "app image-classification cpp tensorflow onnx " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'app,image-classification,cpp,tensorflow,onnx' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "app image-classification cpp tensorflow onnx" - ``` -___ - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_BATCH_COUNT: `1` - * CM_BATCH_SIZE: `1` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-tf-onnx-cpp/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "app image-classification cpp tensorflow onnx " -j -``` \ No newline at end of file diff --git a/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-torch-py/index.md b/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-torch-py/index.md deleted file mode 100644 index 9c96b5ef2..000000000 --- a/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-torch-py/index.md +++ /dev/null @@ -1,113 +0,0 @@ -# app-image-classification-torch-py -Automatically generated README for this automation recipe: **app-image-classification-torch-py** - -Category: **[Modular AI/ML application pipeline](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-torch-py/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-torch-py/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "app image-classification python torch" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=app,image-classification,python,torch[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "app image-classification python torch [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'app,image-classification,python,torch' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "app image-classification python torch[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_cuda` - - ENV variables: - - USE_CUDA: `yes` - -
- -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_BATCH_COUNT: `1` - * CM_BATCH_SIZE: `1` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-torch-py/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-torch-py/run.bat) -___ -#### Script output -```bash -cmr "app image-classification python torch [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tvm-onnx-py/index.md b/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tvm-onnx-py/index.md deleted file mode 100644 index 37f6b98a5..000000000 --- a/docs/scripts/Modular-AI-ML-application-pipeline/app-image-classification-tvm-onnx-py/index.md +++ /dev/null @@ -1,114 +0,0 @@ -# app-image-classification-tvm-onnx-py -Automatically generated README for this automation recipe: **app-image-classification-tvm-onnx-py** - -Category: **[Modular AI/ML application pipeline](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-tvm-onnx-py/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-tvm-onnx-py/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "app image-classification python tvm-onnx" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=app,image-classification,python,tvm-onnx[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "app image-classification python tvm-onnx [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'app,image-classification,python,tvm-onnx' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "app image-classification python tvm-onnx[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_cuda` - - ENV variables: - - USE_CUDA: `yes` - * `_llvm` - -
- -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_BATCH_COUNT: `1` - * CM_BATCH_SIZE: `1` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-classification-tvm-onnx-py/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "app image-classification python tvm-onnx [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/Modular-AI-ML-application-pipeline/app-stable-diffusion-onnx-py/index.md b/docs/scripts/Modular-AI-ML-application-pipeline/app-stable-diffusion-onnx-py/index.md deleted file mode 100644 index af2093236..000000000 --- a/docs/scripts/Modular-AI-ML-application-pipeline/app-stable-diffusion-onnx-py/index.md +++ /dev/null @@ -1,128 +0,0 @@ -# app-stable-diffusion-onnx-py -Automatically generated README for this automation recipe: **app-stable-diffusion-onnx-py** - -Category: **[Modular AI/ML application pipeline](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-stable-diffusion-onnx-py/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-stable-diffusion-onnx-py/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "modular python app stable-diffusion onnx" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=modular,python,app,stable-diffusion,onnx[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "modular python app stable-diffusion onnx [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'modular,python,app,stable-diffusion,onnx' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "modular python app stable-diffusion onnx[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**target**" -
- Click here to expand this section. - - * **`_cpu`** (default) - - ENV variables: - - USE_CPU: `True` - - CM_DEVICE: `cpu` - * `_cuda` - - ENV variables: - - USE_CUDA: `True` - - CM_DEVICE: `cuda:0` - -
- - - ##### Default variations - - `_cpu` -=== "Input Flags" - - - #### Input Flags - - * --**text:** Text to generate image - * --**output:** Output directory -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--output=value` → `CM_APP_STABLE_DIFFUSION_ONNX_PY_OUTPUT=value` - * `--text=value` → `CM_APP_STABLE_DIFFUSION_ONNX_PY_TEXT=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-stable-diffusion-onnx-py/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/app-stable-diffusion-onnx-py/run.bat) -___ -#### Script output -```bash -cmr "modular python app stable-diffusion onnx [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Modular-AI-ML-application-pipeline/index.md b/docs/scripts/Modular-AI-ML-application-pipeline/index.md deleted file mode 100644 index 3de2f8ac6..000000000 --- a/docs/scripts/Modular-AI-ML-application-pipeline/index.md +++ /dev/null @@ -1,5 +0,0 @@ -* [app-image-classification-onnx-py](app-image-classification-onnx-py/index.md) -* [app-image-classification-tf-onnx-cpp](app-image-classification-tf-onnx-cpp/index.md) -* [app-image-classification-torch-py](app-image-classification-torch-py/index.md) -* [app-image-classification-tvm-onnx-py](app-image-classification-tvm-onnx-py/index.md) -* [app-stable-diffusion-onnx-py](app-stable-diffusion-onnx-py/index.md) diff --git a/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-dummy/index.md b/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-dummy/index.md deleted file mode 100644 index b78e6db15..000000000 --- a/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-dummy/index.md +++ /dev/null @@ -1,235 +0,0 @@ -# app-mlperf-inference-dummy -Automatically generated README for this automation recipe: **app-mlperf-inference-dummy** - -Category: **[Modular MLPerf benchmarks](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-dummy/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "reproduce mlcommons mlperf inference harness dummy-harness dummy" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "reproduce mlcommons mlperf inference harness dummy-harness dummy [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'reproduce,mlcommons,mlperf,inference,harness,dummy-harness,dummy' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "reproduce mlcommons mlperf inference harness dummy-harness dummy[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**backend**" -
- Click here to expand this section. - - * **`_pytorch`** (default) - - ENV variables: - - CM_MLPERF_BACKEND: `pytorch` - -
- - - * Group "**batch-size**" -
- Click here to expand this section. - - * `_bs.#` - -
- - - * Group "**device**" -
- Click here to expand this section. - - * **`_cpu`** (default) - - ENV variables: - - CM_MLPERF_DEVICE: `cpu` - * `_cuda` - - ENV variables: - - CM_MLPERF_DEVICE: `gpu` - - CM_MLPERF_DEVICE_LIB_NAMESPEC: `cudart` - -
- - - * Group "**loadgen-scenario**" -
- Click here to expand this section. - - * `_multistream` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `MultiStream` - * `_offline` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `Offline` - * `_server` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `Server` - * `_singlestream` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `SingleStream` - -
- - - * Group "**model**" -
- Click here to expand this section. - - * `_bert-99` - - ENV variables: - - CM_MODEL: `bert-99` - - CM_SQUAD_ACCURACY_DTYPE: `float32` - * `_bert-99.9` - - ENV variables: - - CM_MODEL: `bert-99.9` - * `_gptj-99` - - ENV variables: - - CM_MODEL: `gptj-99` - - CM_SQUAD_ACCURACY_DTYPE: `float32` - * `_gptj-99.9` - - ENV variables: - - CM_MODEL: `gptj-99.9` - * `_llama2-70b-99` - - ENV variables: - - CM_MODEL: `llama2-70b-99` - * `_llama2-70b-99.9` - - ENV variables: - - CM_MODEL: `llama2-70b-99.9` - * **`_resnet50`** (default) - - ENV variables: - - CM_MODEL: `resnet50` - * `_retinanet` - - ENV variables: - - CM_MODEL: `retinanet` - -
- - - * Group "**precision**" -
- Click here to expand this section. - - * `_fp16` - * `_fp32` - * `_uint8` - -
- - - ##### Default variations - - `_cpu,_pytorch,_resnet50` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` - * `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` - * `--mlperf_conf=value` → `CM_MLPERF_CONF=value` - * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` - * `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` - * `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` - * `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` - * `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value` - * `--rerun=value` → `CM_RERUN=value` - * `--results_repo=value` → `CM_MLPERF_INFERENCE_RESULTS_REPO=value` - * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` - * `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` - * `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` - * `--skip_preprocess=value` → `CM_SKIP_PREPROCESS_DATASET=value` - * `--skip_preprocessing=value` → `CM_SKIP_PREPROCESS_DATASET=value` - * `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` - * `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` - * `--user_conf=value` → `CM_MLPERF_USER_CONF=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_MLPERF_LOADGEN_SCENARIO: `Offline` - * CM_MLPERF_LOADGEN_MODE: `performance` - * CM_SKIP_PREPROCESS_DATASET: `no` - * CM_SKIP_MODEL_DOWNLOAD: `no` - * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `dummy_harness` - * CM_MLPERF_SKIP_RUN: `no` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-dummy/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "reproduce mlcommons mlperf inference harness dummy-harness dummy [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-intel/index.md b/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-intel/index.md deleted file mode 100644 index 6bab8f909..000000000 --- a/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-intel/index.md +++ /dev/null @@ -1,347 +0,0 @@ -# app-mlperf-inference-intel -Automatically generated README for this automation recipe: **app-mlperf-inference-intel** - -Category: **[Modular MLPerf benchmarks](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-intel/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "reproduce mlcommons mlperf inference harness intel-harness intel intel-harness intel" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=reproduce,mlcommons,mlperf,inference,harness,intel-harness,intel,intel-harness,intel[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "reproduce mlcommons mlperf inference harness intel-harness intel intel-harness intel [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'reproduce,mlcommons,mlperf,inference,harness,intel-harness,intel,intel-harness,intel' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "reproduce mlcommons mlperf inference harness intel-harness intel intel-harness intel[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_bs.#` - - ENV variables: - - ML_MLPERF_MODEL_BATCH_SIZE: `#` - * `_v3.1` - - ENV variables: - - CM_MLPERF_INFERENCE_CODE_VERSION: `v3.1` - -
- - - * Group "**device**" -
- Click here to expand this section. - - * **`_cpu`** (default) - - ENV variables: - - CM_MLPERF_DEVICE: `cpu` - -
- - - * Group "**framework**" -
- Click here to expand this section. - - * **`_pytorch`** (default) - - ENV variables: - - CM_MLPERF_BACKEND: `pytorch` - - CM_MLPERF_BACKEND_LIB_NAMESPEC: `pytorch` - -
- - - * Group "**loadgen-batchsize**" -
- Click here to expand this section. - - * `_batch_size.#` - - ENV variables: - - CM_MLPERF_LOADGEN_BATCH_SIZE: `#` - -
- - - * Group "**loadgen-scenario**" -
- Click here to expand this section. - - * `_multistream` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `MultiStream` - * `_offline` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `Offline` - * `_server` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `Server` - * `_singlestream` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `SingleStream` - -
- - - * Group "**model**" -
- Click here to expand this section. - - * `_bert-99` - - ENV variables: - - CM_MODEL: `bert-99` - - CM_SQUAD_ACCURACY_DTYPE: `float32` - - CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx` - * `_bert-99.9` - - ENV variables: - - CM_MODEL: `bert-99.9` - - CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3733910/files/model.onnx` - * `_gptj-99` - - ENV variables: - - CM_MODEL: `gptj-99` - - CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3733910/files/model.onnx` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `int8` - * `_gptj-99.9` - - ENV variables: - - CM_MODEL: `gptj-99.9` - - CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3733910/files/model.onnx` - * **`_resnet50`** (default) - - ENV variables: - - CM_MODEL: `resnet50` - - dataset_imagenet_preprocessed_input_square_side: `224` - - ml_model_has_background_class: `YES` - - ml_model_image_height: `224` - - loadgen_buffer_size: `1024` - - loadgen_dataset_size: `50000` - - CM_BENCHMARK: `STANDALONE_CLASSIFICATION` - * `_retinanet` - - ENV variables: - - CM_MODEL: `retinanet` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth` - - dataset_imagenet_preprocessed_input_square_side: `224` - - ml_model_image_height: `800` - - ml_model_image_width: `800` - - loadgen_buffer_size: `64` - - loadgen_dataset_size: `24576` - - CM_BENCHMARK: `STANDALONE_OBJECT_DETECTION` - -
- - - * Group "**network-mode**" -
- Click here to expand this section. - - * `_network-server` - - ENV variables: - - CM_MLPERF_NETWORK_RUN_MODE: `network-server` - * **`_standalone`** (default) - - ENV variables: - - CM_MLPERF_NETWORK_RUN_MODE: `standalone` - -
- - - * Group "**network-run-mode**" -
- Click here to expand this section. - - * `_network-client` - - ENV variables: - - CM_MLPERF_NETWORK_RUN_MODE: `network-client` - -
- - - * Group "**power-mode**" -
- Click here to expand this section. - - * `_maxn` - - ENV variables: - - CM_MLPERF_NVIDIA_HARNESS_MAXN: `True` - * `_maxq` - - ENV variables: - - CM_MLPERF_NVIDIA_HARNESS_MAXQ: `True` - -
- - - * Group "**precision**" -
- Click here to expand this section. - - * `_fp32` - - ENV variables: - - CM_IMAGENET_ACCURACY_DTYPE: `float32` - * `_int4` - * `_uint8` - -
- - - * Group "**run-mode**" -
- Click here to expand this section. - - * `_build-harness` - - ENV variables: - - CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: `build_harness` - * `_calibration` - - ENV variables: - - CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: `calibration` - * **`_run-harness`** (default) - - ENV variables: - - CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE: `run_harness` - -
- - - * Group "**sut**" -
- Click here to expand this section. - - * `_sapphire-rapids.112c` - - ENV variables: - - WARMUP: ` --warmup` - * `_sapphire-rapids.24c` - -
- - - * Group "**version**" -
- Click here to expand this section. - - * **`_v4.0`** (default) - - ENV variables: - - CM_MLPERF_INFERENCE_CODE_VERSION: `v4.0` - -
- - - ##### Default variations - - `_cpu,_pytorch,_resnet50,_run-harness,_standalone,_v4.0` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` - * `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` - * `--mlperf_conf=value` → `CM_MLPERF_CONF=value` - * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` - * `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` - * `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` - * `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` - * `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value` - * `--rerun=value` → `CM_RERUN=value` - * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` - * `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` - * `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` - * `--skip_preprocess=value` → `CM_SKIP_PREPROCESS_DATASET=value` - * `--skip_preprocessing=value` → `CM_SKIP_PREPROCESS_DATASET=value` - * `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` - * `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` - * `--user_conf=value` → `CM_MLPERF_USER_CONF=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_BATCH_COUNT: `1` - * CM_BATCH_SIZE: `1` - * CM_FAST_COMPILATION: `yes` - * CM_MLPERF_LOADGEN_SCENARIO: `Offline` - * CM_MLPERF_LOADGEN_MODE: `performance` - * CM_SKIP_PREPROCESS_DATASET: `no` - * CM_SKIP_MODEL_DOWNLOAD: `no` - * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `intel` - * CM_MLPERF_SKIP_RUN: `no` - * verbosity: `1` - * loadgen_trigger_cold_run: `0` - - - -#### Native script being run -=== "Linux/macOS" - * [run_bert_harness.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-intel/run_bert_harness.sh) - * [run_gptj_harness_v3_1.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-intel/run_gptj_harness_v3_1.sh) - * [run_gptj_harness_v4_0.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-intel/run_gptj_harness_v4_0.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "reproduce mlcommons mlperf inference harness intel-harness intel intel-harness intel [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-qualcomm/index.md b/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-qualcomm/index.md deleted file mode 100644 index b46ea4677..000000000 --- a/docs/scripts/Modular-MLPerf-benchmarks/app-mlperf-inference-qualcomm/index.md +++ /dev/null @@ -1,368 +0,0 @@ -# app-mlperf-inference-qualcomm -Automatically generated README for this automation recipe: **app-mlperf-inference-qualcomm** - -Category: **[Modular MLPerf benchmarks](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-qualcomm/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "reproduce mlcommons mlperf inference harness qualcomm-harness qualcomm kilt-harness kilt" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=reproduce,mlcommons,mlperf,inference,harness,qualcomm-harness,qualcomm,kilt-harness,kilt[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "reproduce mlcommons mlperf inference harness qualcomm-harness qualcomm kilt-harness kilt [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'reproduce,mlcommons,mlperf,inference,harness,qualcomm-harness,qualcomm,kilt-harness,kilt' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "reproduce mlcommons mlperf inference harness qualcomm-harness qualcomm kilt-harness kilt[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_activation-count.#` - - ENV variables: - - CM_MLPERF_QAIC_ACTIVATION_COUNT: `#` - * `_num-devices.4` - - ENV variables: - - CM_QAIC_DEVICES: `0,1,2,3` - * `_pro` - - ENV variables: - - qaic_queue_length: `10` - -
- - - * Group "**batch-size**" -
- Click here to expand this section. - - * `_bs.#` - - ENV variables: - - kilt_model_batch_size: `#` - * `_bs.0` - - ENV variables: - - kilt_model_batch_size: `1` - -
- - - * Group "**device**" -
- Click here to expand this section. - - * **`_cpu`** (default) - - ENV variables: - - CM_MLPERF_DEVICE: `cpu` - - kilt_backend_type: `cpu` - * `_cuda` - - ENV variables: - - CM_MLPERF_DEVICE: `gpu` - - CM_MLPERF_DEVICE_LIB_NAMESPEC: `cudart` - - kilt_backend_type: `gpu` - * `_qaic` - - ENV variables: - - CM_MLPERF_DEVICE: `qaic` - - CM_MLPERF_DEVICE_LIB_NAMESPEC: `QAic` - - kilt_backend_type: `qaic` - -
- - - * Group "**framework**" -
- Click here to expand this section. - - * `_glow` - - ENV variables: - - device: `qaic` - - CM_MLPERF_BACKEND: `glow` - - CM_MLPERF_BACKEND_LIB_NAMESPEC: `QAic` - * **`_onnxruntime`** (default) - - ENV variables: - - device: `onnxrt` - - CM_MLPERF_BACKEND: `onnxruntime` - - CM_MLPERF_BACKEND_LIB_NAMESPEC: `onnxruntime` - * `_tensorrt` - - ENV variables: - - CM_MLPERF_BACKEND: `tensorrt` - - device: `tensorrt` - - CM_MLPERF_BACKEND_NAME: `TensorRT` - -
- - - * Group "**loadgen-batch-size**" -
- Click here to expand this section. - - * `_loadgen-batch-size.#` - - ENV variables: - - CM_MLPERF_LOADGEN_BATCH_SIZE: `#` - -
- - - * Group "**loadgen-scenario**" -
- Click here to expand this section. - - * `_multistream` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `MultiStream` - * `_offline` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `Offline` - * `_server` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `Server` - * `_singlestream` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `SingleStream` - -
- - - * Group "**model**" -
- Click here to expand this section. - - * `_bert-99` - - ENV variables: - - CM_MODEL: `bert-99` - - CM_SQUAD_ACCURACY_DTYPE: `float32` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx` - * `_bert-99.9` - - ENV variables: - - CM_MODEL: `bert-99.9` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3733910/files/model.onnx` - * **`_resnet50`** (default) - - ENV variables: - - CM_MODEL: `resnet50` - - kilt_model_name: `resnet50` - - kilt_input_count: `1` - - kilt_output_count: `1` - - kilt_input_format: `FLOAT32,-1,224,224,3` - - kilt_output_format: `INT64,-1` - - dataset_imagenet_preprocessed_input_square_side: `224` - - ml_model_has_background_class: `YES` - - ml_model_image_height: `224` - - loadgen_buffer_size: `1024` - - loadgen_dataset_size: `50000` - - CM_BENCHMARK: `STANDALONE_CLASSIFICATION` - * `_retinanet` - - ENV variables: - - CM_MODEL: `retinanet` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth` - - kilt_model_name: `retinanet` - - kilt_input_count: `1` - - kilt_model_max_detections: `600` - - kilt_output_count: `1` - - kilt_input_format: `FLOAT32,-1,3,800,800` - - kilt_output_format: `INT64,-1` - - dataset_imagenet_preprocessed_input_square_side: `224` - - ml_model_image_height: `800` - - ml_model_image_width: `800` - - loadgen_buffer_size: `64` - - loadgen_dataset_size: `24576` - - CM_BENCHMARK: `STANDALONE_OBJECT_DETECTION` - -
- - - * Group "**nsp**" -
- Click here to expand this section. - - * `_nsp.#` - * `_nsp.14` - * `_nsp.16` - -
- - - * Group "**power-mode**" -
- Click here to expand this section. - - * `_maxn` - - ENV variables: - - CM_MLPERF_NVIDIA_HARNESS_MAXN: `True` - * `_maxq` - - ENV variables: - - CM_MLPERF_NVIDIA_HARNESS_MAXQ: `True` - -
- - - * Group "**precision**" -
- Click here to expand this section. - - * `_fp16` - * `_fp32` - - ENV variables: - - CM_IMAGENET_ACCURACY_DTYPE: `float32` - * `_uint8` - -
- - - * Group "**run-mode**" -
- Click here to expand this section. - - * `_network-client` - - ENV variables: - - CM_RUN_MODE: `network-client` - * `_network-server` - - ENV variables: - - CM_RUN_MODE: `network-server` - * **`_standalone`** (default) - - ENV variables: - - CM_RUN_MODE: `standalone` - -
- - - * Group "**sut**" -
- Click here to expand this section. - - * `_dl2q.24xlarge` - - ENV variables: - - CM_QAIC_DEVICES: `0,1,2,3,4,5,6,7` - - qaic_queue_length: `4` - * `_rb6` - - ENV variables: - - CM_QAIC_DEVICES: `0` - - qaic_queue_length: `6` - -
- - - ##### Default variations - - `_cpu,_onnxruntime,_resnet50,_standalone` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` - * `--devices=value` → `CM_QAIC_DEVICES=value` - * `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` - * `--mlperf_conf=value` → `CM_MLPERF_CONF=value` - * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` - * `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` - * `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` - * `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` - * `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value` - * `--rerun=value` → `CM_RERUN=value` - * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` - * `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` - * `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` - * `--skip_preprocess=value` → `CM_SKIP_PREPROCESS_DATASET=value` - * `--skip_preprocessing=value` → `CM_SKIP_PREPROCESS_DATASET=value` - * `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` - * `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` - * `--user_conf=value` → `CM_MLPERF_USER_CONF=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_BATCH_COUNT: `1` - * CM_BATCH_SIZE: `1` - * CM_FAST_COMPILATION: `yes` - * CM_MLPERF_LOADGEN_SCENARIO: `Offline` - * CM_MLPERF_LOADGEN_MODE: `performance` - * CM_SKIP_PREPROCESS_DATASET: `no` - * CM_SKIP_MODEL_DOWNLOAD: `no` - * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `kilt` - * CM_MLPERF_SKIP_RUN: `no` - * CM_KILT_REPO_URL: `https://github.com/GATEOverflow/kilt-mlperf` - * CM_QAIC_DEVICES: `0` - * kilt_max_wait_abs: `10000` - * verbosity: `0` - * loadgen_trigger_cold_run: `0` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-qualcomm/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "reproduce mlcommons mlperf inference harness qualcomm-harness qualcomm kilt-harness kilt [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Modular-MLPerf-benchmarks/index.md b/docs/scripts/Modular-MLPerf-benchmarks/index.md deleted file mode 100644 index 9675eae16..000000000 --- a/docs/scripts/Modular-MLPerf-benchmarks/index.md +++ /dev/null @@ -1,3 +0,0 @@ -* [app-mlperf-inference-dummy](app-mlperf-inference-dummy/index.md) -* [app-mlperf-inference-intel](app-mlperf-inference-intel/index.md) -* [app-mlperf-inference-qualcomm](app-mlperf-inference-qualcomm/index.md) diff --git a/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-loadgen-generic-python/index.md b/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-loadgen-generic-python/index.md deleted file mode 100644 index 1a08adcbe..000000000 --- a/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-loadgen-generic-python/index.md +++ /dev/null @@ -1,213 +0,0 @@ -# app-loadgen-generic-python -Automatically generated README for this automation recipe: **app-loadgen-generic-python** - -Category: **[Modular MLPerf inference benchmark pipeline](..)** - -License: **Apache 2.0** - -Developers: [Gaz Iqbal](https://www.linkedin.com/in/gaziqbal), [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin) -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-loadgen-generic-python/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-loadgen-generic-python/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "python app generic loadgen" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=python,app,generic,loadgen[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "python app generic loadgen [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'python,app,generic,loadgen' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "python app generic loadgen[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_cmc` - - ENV variables: - - CM_CUSTOM_MODEL_CMC: `True` - * `_huggingface` - - ENV variables: - - CM_CUSTOM_MODEL_SOURCE: `huggingface` - * `_model-stub.#` - - ENV variables: - - CM_ML_MODEL_STUB: `#` - -
- - - * Group "**backend**" -
- Click here to expand this section. - - * **`_onnxruntime`** (default) - - ENV variables: - - CM_MLPERF_BACKEND: `onnxruntime` - * `_pytorch` - - ENV variables: - - CM_MLPERF_BACKEND: `pytorch` - -
- - - * Group "**device**" -
- Click here to expand this section. - - * **`_cpu`** (default) - - ENV variables: - - CM_MLPERF_DEVICE: `cpu` - - CM_MLPERF_EXECUTION_PROVIDER: `CPUExecutionProvider` - * `_cuda` - - ENV variables: - - CM_MLPERF_DEVICE: `gpu` - - CM_MLPERF_EXECUTION_PROVIDER: `CUDAExecutionProvider` - -
- - - * Group "**models**" -
- Click here to expand this section. - - * `_custom` - - ENV variables: - - CM_MODEL: `custom` - * `_resnet50` - - ENV variables: - - CM_MODEL: `resnet50` - * `_retinanet` - - ENV variables: - - CM_MODEL: `retinanet` - -
- - - ##### Default variations - - `_cpu,_onnxruntime` -=== "Input Flags" - - - #### Input Flags - - * --**modelpath:** Full path to file with model weights - * --**modelcodepath:** (for PyTorch models) Full path to file with model code and cmc.py - * --**modelcfgpath:** (for PyTorch models) Full path to JSON file with model cfg - * --**modelsamplepath:** (for PyTorch models) Full path to file with model sample in pickle format - * --**ep:** ONNX Execution provider - * --**scenario:** MLPerf LoadGen scenario - * --**samples:** Number of samples (*2*) - * --**runner:** MLPerf runner - * --**execmode:** MLPerf exec mode - * --**output_dir:** MLPerf output directory - * --**concurrency:** MLPerf concurrency - * --**intraop:** MLPerf intra op threads - * --**interop:** MLPerf inter op threads -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--concurrency=value` → `CM_MLPERF_CONCURRENCY=value` - * `--ep=value` → `CM_MLPERF_EXECUTION_PROVIDER=value` - * `--execmode=value` → `CM_MLPERF_EXEC_MODE=value` - * `--interop=value` → `CM_MLPERF_INTEROP=value` - * `--intraop=value` → `CM_MLPERF_INTRAOP=value` - * `--loadgen_duration_sec=value` → `CM_MLPERF_LOADGEN_DURATION_SEC=value` - * `--loadgen_expected_qps=value` → `CM_MLPERF_LOADGEN_EXPECTED_QPS=value` - * `--modelcfg=value` → `CM_ML_MODEL_CFG=value` - * `--modelcfgpath=value` → `CM_ML_MODEL_CFG_WITH_PATH=value` - * `--modelcodepath=value` → `CM_ML_MODEL_CODE_WITH_PATH=value` - * `--modelpath=value` → `CM_ML_MODEL_FILE_WITH_PATH=value` - * `--modelsamplepath=value` → `CM_ML_MODEL_SAMPLE_WITH_PATH=value` - * `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` - * `--runner=value` → `CM_MLPERF_RUNNER=value` - * `--samples=value` → `CM_MLPERF_LOADGEN_SAMPLES=value` - * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_MLPERF_EXECUTION_MODE: `parallel` - * CM_MLPERF_BACKEND: `onnxruntime` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-loadgen-generic-python/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/app-loadgen-generic-python/run.bat) -___ -#### Script output -```bash -cmr "python app generic loadgen [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-ctuning-cpp-tflite/index.md b/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-ctuning-cpp-tflite/index.md deleted file mode 100644 index 9e1044451..000000000 --- a/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-ctuning-cpp-tflite/index.md +++ /dev/null @@ -1,236 +0,0 @@ -# app-mlperf-inference-ctuning-cpp-tflite -Automatically generated README for this automation recipe: **app-mlperf-inference-ctuning-cpp-tflite** - -Category: **[Modular MLPerf inference benchmark pipeline](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-ctuning-cpp-tflite/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "app mlperf inference tflite-cpp" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=app,mlperf,inference,tflite-cpp[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "app mlperf inference tflite-cpp [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'app,mlperf,inference,tflite-cpp' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "app mlperf inference tflite-cpp[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_armnn` - - ENV variables: - - CM_MLPERF_TFLITE_USE_ARMNN: `yes` - - CM_TMP_LINK_LIBS: `tensorflowlite,armnn` - -
- - - * Group "**backend**" -
- Click here to expand this section. - - * `_tf` - - ENV variables: - - CM_MLPERF_BACKEND: `tf` - * **`_tflite`** (default) - - ENV variables: - - CM_MLPERF_BACKEND: `tflite` - - CM_MLPERF_BACKEND_VERSION: `master` - - CM_TMP_LINK_LIBS: `tensorflowlite` - - CM_TMP_SRC_FOLDER: `src` - -
- - - * Group "**device**" -
- Click here to expand this section. - - * **`_cpu`** (default) - - ENV variables: - - CM_MLPERF_DEVICE: `cpu` - * `_gpu` - - ENV variables: - - CM_MLPERF_DEVICE: `gpu` - - CM_MLPERF_DEVICE_LIB_NAMESPEC: `cudart` - -
- - - * Group "**loadgen-scenario**" -
- Click here to expand this section. - - * **`_singlestream`** (default) - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `SingleStream` - -
- - - * Group "**model**" -
- Click here to expand this section. - - * `_efficientnet` - - ENV variables: - - CM_MODEL: `efficientnet` - * `_mobilenet` - - ENV variables: - - CM_MODEL: `mobilenet` - * **`_resnet50`** (default) - - ENV variables: - - CM_MODEL: `resnet50` - -
- - - * Group "**optimization-target**" -
- Click here to expand this section. - - * `_use-neon` - - ENV variables: - - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: `using_neon` - - CM_MLPERF_TFLITE_USE_NEON: `1` - * `_use-opencl` - - ENV variables: - - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: `using_opencl` - - CM_MLPERF_TFLITE_USE_OPENCL: `1` - -
- - - * Group "**precision**" -
- Click here to expand this section. - - * **`_fp32`** (default) - - ENV variables: - - CM_MLPERF_MODEL_PRECISION: `float32` - * `_int8` - - ENV variables: - - CM_DATASET_COMPRESSED: `on` - - CM_MLPERF_MODEL_PRECISION: `int8` - * `_uint8` - - ENV variables: - - CM_DATASET_COMPRESSED: `on` - - CM_MLPERF_MODEL_PRECISION: `uint8` - -
- - - ##### Default variations - - `_cpu,_fp32,_resnet50,_singlestream,_tflite` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--compressed_dataset=value` → `CM_DATASET_COMPRESSED=value` - * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` - * `--mlperf_conf=value` → `CM_MLPERF_CONF=value` - * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` - * `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` - * `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value` - * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` - * `--user_conf=value` → `CM_MLPERF_USER_CONF=value` - * `--verbose=value` → `CM_VERBOSE=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_DATASET_COMPRESSED: `off` - * CM_DATASET_INPUT_SQUARE_SIDE: `224` - * CM_FAST_COMPILATION: `yes` - * CM_LOADGEN_BUFFER_SIZE: `1024` - * CM_MLPERF_LOADGEN_MODE: `accuracy` - * CM_MLPERF_LOADGEN_SCENARIO: `SingleStream` - * CM_MLPERF_LOADGEN_TRIGGER_COLD_RUN: `0` - * CM_MLPERF_OUTPUT_DIR: `.` - * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `tflite_cpp` - * CM_MLPERF_TFLITE_USE_NEON: `0` - * CM_MLPERF_TFLITE_USE_OPENCL: `0` - * CM_ML_MODEL_GIVEN_CHANNEL_MEANS: `123.68 116.78 103.94` - * CM_ML_MODEL_NORMALIZE_DATA: `0` - * CM_ML_MODEL_SUBTRACT_MEANS: `1` - * CM_VERBOSE: `0` - - - -___ -#### Script output -```bash -cmr "app mlperf inference tflite-cpp [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-mlcommons-cpp/index.md b/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-mlcommons-cpp/index.md deleted file mode 100644 index a2c71b5f5..000000000 --- a/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-mlcommons-cpp/index.md +++ /dev/null @@ -1,204 +0,0 @@ -# app-mlperf-inference-mlcommons-cpp -Automatically generated README for this automation recipe: **app-mlperf-inference-mlcommons-cpp** - -Category: **[Modular MLPerf inference benchmark pipeline](..)** - -License: **Apache 2.0** - -Developers: [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin) -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-mlcommons-cpp/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-mlcommons-cpp/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "app mlcommons mlperf inference cpp" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=app,mlcommons,mlperf,inference,cpp[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "app mlcommons mlperf inference cpp [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'app,mlcommons,mlperf,inference,cpp' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "app mlcommons mlperf inference cpp[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**batch-size**" -
- Click here to expand this section. - - * `_batch-size.#` - - ENV variables: - - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: `#` - -
- - - * Group "**device**" -
- Click here to expand this section. - - * **`_cpu`** (default) - - ENV variables: - - CM_MLPERF_DEVICE: `cpu` - * `_cuda` - - ENV variables: - - CM_MLPERF_DEVICE: `gpu` - - CM_MLPERF_DEVICE_LIB_NAMESPEC: `cudart` - -
- - - * Group "**framework**" -
- Click here to expand this section. - - * **`_onnxruntime`** (default) - - ENV variables: - - CM_MLPERF_BACKEND: `onnxruntime` - - CM_MLPERF_BACKEND_LIB_NAMESPEC: `onnxruntime` - * `_pytorch` - - ENV variables: - - CM_MLPERF_BACKEND: `pytorch` - * `_tf` - - ENV variables: - - CM_MLPERF_BACKEND: `tf` - * `_tflite` - - ENV variables: - - CM_MLPERF_BACKEND: `tflite` - * `_tvm-onnx` - - ENV variables: - - CM_MLPERF_BACKEND: `tvm-onnx` - -
- - - * Group "**loadgen-scenario**" -
- Click here to expand this section. - - * `_multistream` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `MultiStream` - * **`_offline`** (default) - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `Offline` - * `_server` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `Server` - * `_singlestream` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `SingleStream` - - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: `1` - -
- - - * Group "**model**" -
- Click here to expand this section. - - * **`_resnet50`** (default) - - ENV variables: - - CM_MODEL: `resnet50` - * `_retinanet` - - ENV variables: - - CM_MODEL: `retinanet` - -
- - - ##### Default variations - - `_cpu,_offline,_onnxruntime,_resnet50` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` - * `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` - * `--mlperf_conf=value` → `CM_MLPERF_CONF=value` - * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` - * `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` - * `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value` - * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` - * `--user_conf=value` → `CM_MLPERF_USER_CONF=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_BATCH_COUNT: `1` - * CM_BATCH_SIZE: `1` - * CM_FAST_COMPILATION: `yes` - * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `cpp` - - - -___ -#### Script output -```bash -cmr "app mlcommons mlperf inference cpp [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-mlcommons-python/index.md b/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-mlcommons-python/index.md deleted file mode 100644 index 4206a8ee0..000000000 --- a/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference-mlcommons-python/index.md +++ /dev/null @@ -1,392 +0,0 @@ -# app-mlperf-inference-mlcommons-python -Automatically generated README for this automation recipe: **app-mlperf-inference-mlcommons-python** - -Category: **[Modular MLPerf inference benchmark pipeline](..)** - -License: **Apache 2.0** - -Developers: [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), [Grigori Fursin](https://cKnowledge.org/gfursin) -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-mlcommons-python/README-extra.md) - - ---- - -This portable CM script is being developed by the [MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/mlperf-education-workgroup.md) -to modularize the *python reference implementations* of the [MLPerf inference benchmark](https://github.com/mlcommons/inference) -using the [MLCommons CM automation meta-framework](https://github.com/mlcommons/ck). -The goal is to make it easier to run, optimize and reproduce MLPerf benchmarks -across diverse platforms with continuously changing software and hardware. - -See the current coverage of different models, devices and backends [here](README-extra.md#current-coverage). - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-mlcommons-python/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "app vision language mlcommons mlperf inference reference ref" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=app,vision,language,mlcommons,mlperf,inference,reference,ref[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "app vision language mlcommons mlperf inference reference ref [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'app,vision,language,mlcommons,mlperf,inference,reference,ref' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "app vision language mlcommons mlperf inference reference ref[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_3d-unet` - - ENV variables: - - CM_TMP_IGNORE_MLPERF_QUERY_COUNT: `True` - - CM_MLPERF_MODEL_SKIP_BATCHING: `True` - * `_beam_size.#` - - ENV variables: - - GPTJ_BEAM_SIZE: `#` - * `_bert` - - ENV variables: - - CM_MLPERF_MODEL_SKIP_BATCHING: `True` - * `_dlrm` - - ENV variables: - - CM_MLPERF_MODEL_SKIP_BATCHING: `True` - * `_multistream` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `MultiStream` - * `_offline` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `Offline` - * `_r2.1_default` - - ENV variables: - - CM_RERUN: `yes` - - CM_SKIP_SYS_UTILS: `yes` - - CM_TEST_QUERY_COUNT: `100` - * `_server` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `Server` - * `_singlestream` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `SingleStream` - -
- - - * Group "**batch-size**" -
- Click here to expand this section. - - * `_batch_size.#` - - ENV variables: - - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: `#` - -
- - - * Group "**device**" -
- Click here to expand this section. - - * **`_cpu`** (default) - - ENV variables: - - CM_MLPERF_DEVICE: `cpu` - - CUDA_VISIBLE_DEVICES: `` - - USE_CUDA: `False` - - USE_GPU: `False` - * `_cuda` - - ENV variables: - - CM_MLPERF_DEVICE: `gpu` - - USE_CUDA: `True` - - USE_GPU: `True` - * `_rocm` - - ENV variables: - - CM_MLPERF_DEVICE: `rocm` - - USE_GPU: `True` - * `_tpu` - - ENV variables: - - CM_MLPERF_DEVICE: `tpu` - -
- - - * Group "**framework**" -
- Click here to expand this section. - - * `_deepsparse` - - ENV variables: - - CM_MLPERF_BACKEND: `deepsparse` - - CM_MLPERF_BACKEND_VERSION: `<<>>` - * `_ncnn` - - ENV variables: - - CM_MLPERF_BACKEND: `ncnn` - - CM_MLPERF_BACKEND_VERSION: `<<>>` - - CM_MLPERF_VISION_DATASET_OPTION: `imagenet_pytorch` - * **`_onnxruntime`** (default) - - ENV variables: - - CM_MLPERF_BACKEND: `onnxruntime` - * `_pytorch` - - ENV variables: - - CM_MLPERF_BACKEND: `pytorch` - - CM_MLPERF_BACKEND_VERSION: `<<>>` - * `_ray` - - ENV variables: - - CM_MLPERF_BACKEND: `ray` - - CM_MLPERF_BACKEND_VERSION: `<<>>` - * `_tf` - - Aliases: `_tensorflow` - - ENV variables: - - CM_MLPERF_BACKEND: `tf` - - CM_MLPERF_BACKEND_VERSION: `<<>>` - * `_tflite` - - ENV variables: - - CM_MLPERF_BACKEND: `tflite` - - CM_MLPERF_BACKEND_VERSION: `<<>>` - - CM_MLPERF_VISION_DATASET_OPTION: `imagenet_tflite_tpu` - * `_tvm-onnx` - - ENV variables: - - CM_MLPERF_BACKEND: `tvm-onnx` - - CM_MLPERF_BACKEND_VERSION: `<<>>` - * `_tvm-pytorch` - - ENV variables: - - CM_MLPERF_BACKEND: `tvm-pytorch` - - CM_MLPERF_BACKEND_VERSION: `<<>>` - - CM_PREPROCESS_PYTORCH: `yes` - - MLPERF_TVM_TORCH_QUANTIZED_ENGINE: `qnnpack` - * `_tvm-tflite` - - ENV variables: - - CM_MLPERF_BACKEND: `tvm-tflite` - - CM_MLPERF_BACKEND_VERSION: `<<>>` - -
- - - * Group "**implementation**" -
- Click here to expand this section. - - * **`_python`** (default) - - ENV variables: - - CM_MLPERF_PYTHON: `yes` - - CM_MLPERF_IMPLEMENTATION: `reference` - -
- - - * Group "**models**" -
- Click here to expand this section. - - * `_3d-unet-99` - - ENV variables: - - CM_MODEL: `3d-unet-99` - * `_3d-unet-99.9` - - ENV variables: - - CM_MODEL: `3d-unet-99.9` - * `_bert-99` - - ENV variables: - - CM_MODEL: `bert-99` - * `_bert-99.9` - - ENV variables: - - CM_MODEL: `bert-99.9` - * `_dlrm-99` - - ENV variables: - - CM_MODEL: `dlrm-99` - * `_dlrm-99.9` - - ENV variables: - - CM_MODEL: `dlrm-99.9` - * `_gptj-99` - - ENV variables: - - CM_MODEL: `gptj-99` - * `_gptj-99.9` - - ENV variables: - - CM_MODEL: `gptj-99.9` - * `_llama2-70b-99` - - ENV variables: - - CM_MODEL: `llama2-70b-99` - * `_llama2-70b-99.9` - - ENV variables: - - CM_MODEL: `llama2-70b-99.9` - * **`_resnet50`** (default) - - ENV variables: - - CM_MODEL: `resnet50` - - CM_MLPERF_USE_MLCOMMONS_RUN_SCRIPT: `yes` - * `_retinanet` - - ENV variables: - - CM_MODEL: `retinanet` - - CM_MLPERF_USE_MLCOMMONS_RUN_SCRIPT: `yes` - - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: `1` - * `_rnnt` - - ENV variables: - - CM_MODEL: `rnnt` - - CM_MLPERF_MODEL_SKIP_BATCHING: `True` - - CM_TMP_IGNORE_MLPERF_QUERY_COUNT: `True` - * `_sdxl` - - ENV variables: - - CM_MODEL: `stable-diffusion-xl` - - CM_NUM_THREADS: `1` - -
- - - * Group "**network**" -
- Click here to expand this section. - - * `_network-lon` - - ENV variables: - - CM_NETWORK_LOADGEN: `lon` - - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: `network_loadgen` - * `_network-sut` - - ENV variables: - - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX1: `network_sut` - - CM_NETWORK_LOADGEN: `sut` - -
- - - * Group "**precision**" -
- Click here to expand this section. - - * `_bfloat16` - - ENV variables: - - CM_MLPERF_QUANTIZATION: `False` - - CM_MLPERF_MODEL_PRECISION: `bfloat16` - * `_float16` - - ENV variables: - - CM_MLPERF_QUANTIZATION: `False` - - CM_MLPERF_MODEL_PRECISION: `float16` - * **`_fp32`** (default) - - ENV variables: - - CM_MLPERF_QUANTIZATION: `False` - - CM_MLPERF_MODEL_PRECISION: `float32` - * `_int8` - - Aliases: `_quantized` - - ENV variables: - - CM_MLPERF_QUANTIZATION: `True` - - CM_MLPERF_MODEL_PRECISION: `int8` - -
- - - ##### Default variations - - `_cpu,_fp32,_onnxruntime,_python,_resnet50` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--clean=value` → `CM_MLPERF_CLEAN_SUBMISSION_DIR=value` - * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` - * `--dataset=value` → `CM_MLPERF_VISION_DATASET_OPTION=value` - * `--dataset_args=value` → `CM_MLPERF_EXTRA_DATASET_ARGS=value` - * `--docker=value` → `CM_RUN_DOCKER_CONTAINER=value` - * `--hw_name=value` → `CM_HW_NAME=value` - * `--imagenet_path=value` → `IMAGENET_PATH=value` - * `--max_amps=value` → `CM_MLPERF_POWER_MAX_AMPS=value` - * `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` - * `--max_volts=value` → `CM_MLPERF_POWER_MAX_VOLTS=value` - * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` - * `--model=value` → `CM_MLPERF_CUSTOM_MODEL_PATH=value` - * `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` - * `--network=value` → `CM_NETWORK_LOADGEN=value` - * `--ntp_server=value` → `CM_MLPERF_POWER_NTP_SERVER=value` - * `--num_threads=value` → `CM_NUM_THREADS=value` - * `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` - * `--output_dir=value` → `OUTPUT_BASE_DIR=value` - * `--power=value` → `CM_MLPERF_POWER=value` - * `--power_server=value` → `CM_MLPERF_POWER_SERVER_ADDRESS=value` - * `--regenerate_files=value` → `CM_REGENERATE_MEASURE_FILES=value` - * `--rerun=value` → `CM_RERUN=value` - * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` - * `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` - * `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` - * `--sut_servers=value` → `CM_NETWORK_LOADGEN_SUT_SERVERS=value` - * `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` - * `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` - * `--test_query_count=value` → `CM_TEST_QUERY_COUNT=value` - * `--threads=value` → `CM_NUM_THREADS=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_MLPERF_LOADGEN_MODE: `accuracy` - * CM_MLPERF_LOADGEN_SCENARIO: `Offline` - * CM_OUTPUT_FOLDER_NAME: `test_results` - * CM_MLPERF_RUN_STYLE: `test` - * CM_TEST_QUERY_COUNT: `10` - * CM_MLPERF_QUANTIZATION: `False` - * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `reference` - * CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: `` - - - -___ -#### Script output -```bash -cmr "app vision language mlcommons mlperf inference reference ref [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference/index.md b/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference/index.md deleted file mode 100644 index aefdce10c..000000000 --- a/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/app-mlperf-inference/index.md +++ /dev/null @@ -1,488 +0,0 @@ -# app-mlperf-inference -Automatically generated README for this automation recipe: **app-mlperf-inference** - -Category: **[Modular MLPerf inference benchmark pipeline](..)** - -License: **Apache 2.0** - -Developers: [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189), [Grigori Fursin](https://cKnowledge.org/gfursin) -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference/README-extra.md) - - ---- - -This CM script provides a unified interface to prepare and run a modular version of the [MLPerf inference benchmark](https://arxiv.org/abs/1911.02549) -across diverse ML models, data sets, frameworks, libraries, run-time systems and platforms -using the [cross-platform automation meta-framework (MLCommons CM)](https://github.com/mlcommons/ck). - -It is assembled from reusable and interoperable [CM scripts for DevOps and MLOps](../list_of_scripts.md) -being developed by the [open MLCommons taskforce on automation and reproducibility](../mlperf-education-workgroup.md). - -It is a higher-level wrapper to several other CM scripts modularizing the MLPerf inference benchmark: -* [Reference Python implementation](../app-mlperf-inference-reference) -* [Universal C++ implementation](../app-mlperf-inference-cpp) -* [TFLite C++ implementation](../app-mlperf-inference-tflite-cpp) -* [NVidia optimized implementation](app-mlperf-inference-nvidia) - -See [this SCC'23 tutorial](https://github.com/mlcommons/ck/blob/master/docs/tutorials/sc22-scc-mlperf.md) -to use this script to run a reference (unoptimized) Python implementation of the MLPerf object detection benchmark -with RetinaNet model, Open Images dataset, ONNX runtime and CPU target. - -See this [CM script](../run-mlperf-inference-app) to automate and validate your MLPerf inference submission. - -Get in touch with the [open taskforce on automation and reproducibility at MLCommons](https://github.com/mlcommons/ck/blob/master/docs/mlperf-education-workgroup.md) -if you need help with your submission or if you would like to participate in further modularization of MLPerf -and collaborative design space exploration and optimization of ML Systems. - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "app vision language mlcommons mlperf inference generic" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=app,vision,language,mlcommons,mlperf,inference,generic[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "app vision language mlcommons mlperf inference generic [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'app,vision,language,mlcommons,mlperf,inference,generic' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "app vision language mlcommons mlperf inference generic[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**implementation**" -
- Click here to expand this section. - - * `_cpp` - - Aliases: `_mil,_mlcommons-cpp` - - ENV variables: - - CM_MLPERF_CPP: `yes` - - CM_MLPERF_IMPLEMENTATION: `mlcommons_cpp` - - CM_IMAGENET_ACCURACY_DTYPE: `float32` - - CM_OPENIMAGES_ACCURACY_DTYPE: `float32` - * `_intel-original` - - Aliases: `_intel` - - ENV variables: - - CM_MLPERF_IMPLEMENTATION: `intel` - * `_kilt` - - Aliases: `_qualcomm` - - ENV variables: - - CM_MLPERF_IMPLEMENTATION: `qualcomm` - * `_nvidia-original` - - Aliases: `_nvidia` - - ENV variables: - - CM_MLPERF_IMPLEMENTATION: `nvidia` - - CM_SQUAD_ACCURACY_DTYPE: `float16` - - CM_IMAGENET_ACCURACY_DTYPE: `int32` - - CM_CNNDM_ACCURACY_DTYPE: `int32` - - CM_LIBRISPEECH_ACCURACY_DTYPE: `int8` - * **`_reference`** (default) - - Aliases: `_mlcommons-python,_python` - - ENV variables: - - CM_MLPERF_PYTHON: `yes` - - CM_MLPERF_IMPLEMENTATION: `mlcommons_python` - - CM_SQUAD_ACCURACY_DTYPE: `float32` - - CM_IMAGENET_ACCURACY_DTYPE: `float32` - - CM_OPENIMAGES_ACCURACY_DTYPE: `float32` - - CM_LIBRISPEECH_ACCURACY_DTYPE: `float32` - - CM_CNNDM_ACCURACY_DTYPE: `int32` - * `_tflite-cpp` - - Aliases: `_ctuning-cpp-tflite` - - ENV variables: - - CM_MLPERF_TFLITE_CPP: `yes` - - CM_MLPERF_CPP: `yes` - - CM_MLPERF_IMPLEMENTATION: `ctuning_cpp_tflite` - - CM_IMAGENET_ACCURACY_DTYPE: `float32` - -
- - - * Group "**backend**" -
- Click here to expand this section. - - * `_deepsparse` - - ENV variables: - - CM_MLPERF_BACKEND: `deepsparse` - * `_glow` - - ENV variables: - - CM_MLPERF_BACKEND: `glow` - * `_ncnn` - - ENV variables: - - CM_MLPERF_BACKEND: `ncnn` - * `_onnxruntime` - - ENV variables: - - CM_MLPERF_BACKEND: `onnxruntime` - * `_pytorch` - - ENV variables: - - CM_MLPERF_BACKEND: `pytorch` - * `_ray` - - ENV variables: - - CM_MLPERF_BACKEND: `ray` - * `_tensorrt` - - ENV variables: - - CM_MLPERF_BACKEND: `tensorrt` - * `_tf` - - ENV variables: - - CM_MLPERF_BACKEND: `tf` - * `_tflite` - - ENV variables: - - CM_MLPERF_BACKEND: `tflite` - * `_tvm-onnx` - - ENV variables: - - CM_MLPERF_BACKEND: `tvm-onnx` - * `_tvm-pytorch` - - ENV variables: - - CM_MLPERF_BACKEND: `tvm-pytorch` - * `_tvm-tflite` - - ENV variables: - - CM_MLPERF_BACKEND: `tvm-tflite` - -
- - - * Group "**device**" -
- Click here to expand this section. - - * **`_cpu`** (default) - - ENV variables: - - CM_MLPERF_DEVICE: `cpu` - * `_cuda` - - ENV variables: - - CM_MLPERF_DEVICE: `gpu` - * `_qaic` - - ENV variables: - - CM_MLPERF_DEVICE: `qaic` - * `_rocm` - - ENV variables: - - CM_MLPERF_DEVICE: `rocm` - * `_tpu` - - ENV variables: - - CM_MLPERF_DEVICE: `tpu` - -
- - - * Group "**model**" -
- Click here to expand this section. - - * `_3d-unet-99` - - ENV variables: - - CM_MODEL: `3d-unet-99` - * `_3d-unet-99.9` - - ENV variables: - - CM_MODEL: `3d-unet-99.9` - * `_bert-99` - - ENV variables: - - CM_MODEL: `bert-99` - * `_bert-99.9` - - ENV variables: - - CM_MODEL: `bert-99.9` - * `_dlrm-v2-99` - - ENV variables: - - CM_MODEL: `dlrm-v2-99` - * `_dlrm-v2-99.9` - - ENV variables: - - CM_MODEL: `dlrm-v2-99.9` - * `_efficientnet` - - ENV variables: - - CM_MODEL: `efficientnet` - * `_gptj-99` - - ENV variables: - - CM_MODEL: `gptj-99` - * `_gptj-99.9` - - ENV variables: - - CM_MODEL: `gptj-99.9` - * `_llama2-70b-99` - - ENV variables: - - CM_MODEL: `llama2-70b-99` - * `_llama2-70b-99.9` - - ENV variables: - - CM_MODEL: `llama2-70b-99.9` - * `_mobilenet` - - ENV variables: - - CM_MODEL: `mobilenet` - * **`_resnet50`** (default) - - ENV variables: - - CM_MODEL: `resnet50` - * `_retinanet` - - ENV variables: - - CM_MODEL: `retinanet` - * `_rnnt` - - ENV variables: - - CM_MODEL: `rnnt` - * `_sdxl` - - ENV variables: - - CM_MODEL: `stable-diffusion-xl` - -
- - - * Group "**precision**" -
- Click here to expand this section. - - * `_bfloat16` - - ENV variables: - - CM_MLPERF_QUANTIZATION: `False` - - CM_MLPERF_MODEL_PRECISION: `float32` - * `_float16` - - ENV variables: - - CM_MLPERF_QUANTIZATION: `False` - - CM_MLPERF_MODEL_PRECISION: `float32` - * **`_float32`** (default) - - Aliases: `_fp32` - - ENV variables: - - CM_MLPERF_QUANTIZATION: `False` - - CM_MLPERF_MODEL_PRECISION: `float32` - * `_int4` - - ENV variables: - - CM_MLPERF_QUANTIZATION: `True` - - CM_MLPERF_MODEL_PRECISION: `int4` - * `_int8` - - Aliases: `_quantized` - - ENV variables: - - CM_MLPERF_QUANTIZATION: `True` - - CM_MLPERF_MODEL_PRECISION: `int8` - * `_uint8` - - ENV variables: - - CM_MLPERF_QUANTIZATION: `True` - - CM_MLPERF_MODEL_PRECISION: `uint8` - -
- - - * Group "**execution-mode**" -
- Click here to expand this section. - - * `_fast` - - ENV variables: - - CM_FAST_FACTOR: `5` - - CM_OUTPUT_FOLDER_NAME: `fast_results` - - CM_MLPERF_RUN_STYLE: `fast` - * **`_test`** (default) - - ENV variables: - - CM_OUTPUT_FOLDER_NAME: `test_results` - - CM_MLPERF_RUN_STYLE: `test` - * `_valid` - - ENV variables: - - CM_OUTPUT_FOLDER_NAME: `valid_results` - - CM_MLPERF_RUN_STYLE: `valid` - -
- - - * Group "**reproducibility**" -
- Click here to expand this section. - - * `_r2.1_default` - - ENV variables: - - CM_SKIP_SYS_UTILS: `yes` - - CM_TEST_QUERY_COUNT: `100` - * `_r3.0_default` - - ENV variables: - - CM_SKIP_SYS_UTILS: `yes` - * `_r3.1_default` - * `_r4.0_default` - - ENV variables: - - CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: `/opt/nvmitten-0.1.3-cp38-cp38-linux_x86_64.whl` - * `_r4.1_default` - - ENV variables: - - CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: `/opt/nvmitten-0.1.3b0-cp38-cp38-linux_x86_64.whl` - -
- - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_power` - - ENV variables: - - CM_MLPERF_POWER: `yes` - - CM_SYSTEM_POWER: `yes` - -
- - - * Group "**batch_size**" -
- Click here to expand this section. - - * `_batch_size.#` - - ENV variables: - - CM_MLPERF_LOADGEN_MAX_BATCHSIZE: `#` - -
- - - * Group "**loadgen-scenario**" -
- Click here to expand this section. - - * `_multistream` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `MultiStream` - * **`_offline`** (default) - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `Offline` - * `_server` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `Server` - * `_singlestream` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `SingleStream` - -
- - - ##### Default variations - - `_cpu,_float32,_offline,_reference,_resnet50,_test` -=== "Input Flags" - - - #### Input Flags - - * --**scenario:** MLPerf inference scenario {Offline,Server,SingleStream,MultiStream} (*Offline*) - * --**mode:** MLPerf inference mode {performance,accuracy} (*accuracy*) - * --**test_query_count:** Specifies the number of samples to be processed during a test run - * --**target_qps:** Target QPS - * --**target_latency:** Target Latency - * --**max_batchsize:** Maximum batchsize to be used - * --**num_threads:** Number of CPU threads to launch the application with - * --**hw_name:** Valid value - any system description which has a config file (under same name) defined [here](https://github.com/mlcommons/cm4mlops/tree/main/script/get-configs-sut-mlperf-inference/configs) - * --**output_dir:** Location where the outputs are produced - * --**rerun:** Redo the run even if previous run files exist (*True*) - * --**regenerate_files:** Regenerates measurement files including accuracy.txt files even if a previous run exists. This option is redundant if `--rerun` is used - * --**adr.python.name:** Python virtual environment name (optional) (*mlperf*) - * --**adr.python.version_min:** Minimal Python version (*3.8*) - * --**adr.python.version:** Force Python version (must have all system deps) - * --**adr.compiler.tags:** Compiler for loadgen (*gcc*) - * --**adr.inference-src-loadgen.env.CM_GIT_URL:** Git URL for MLPerf inference sources to build LoadGen (to enable non-reference implementations) - * --**adr.inference-src.env.CM_GIT_URL:** Git URL for MLPerf inference sources to run benchmarks (to enable non-reference implementations) - * --**quiet:** Quiet run (select default values for all questions) (*False*) - * --**readme:** Generate README with the reproducibility report - * --**debug:** Debug MLPerf script -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--clean=value` → `CM_MLPERF_CLEAN_SUBMISSION_DIR=value` - * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` - * `--debug=value` → `CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM=value` - * `--docker=value` → `CM_RUN_DOCKER_CONTAINER=value` - * `--gpu_name=value` → `CM_NVIDIA_GPU_NAME=value` - * `--hw_name=value` → `CM_HW_NAME=value` - * `--imagenet_path=value` → `IMAGENET_PATH=value` - * `--max_amps=value` → `CM_MLPERF_POWER_MAX_AMPS=value` - * `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` - * `--max_volts=value` → `CM_MLPERF_POWER_MAX_VOLTS=value` - * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` - * `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` - * `--ntp_server=value` → `CM_MLPERF_POWER_NTP_SERVER=value` - * `--num_threads=value` → `CM_NUM_THREADS=value` - * `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` - * `--output_dir=value` → `OUTPUT_BASE_DIR=value` - * `--power=value` → `CM_MLPERF_POWER=value` - * `--power_server=value` → `CM_MLPERF_POWER_SERVER_ADDRESS=value` - * `--readme=value` → `CM_MLPERF_README=value` - * `--regenerate_files=value` → `CM_REGENERATE_MEASURE_FILES=value` - * `--rerun=value` → `CM_RERUN=value` - * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` - * `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` - * `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` - * `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` - * `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` - * `--test_query_count=value` → `CM_TEST_QUERY_COUNT=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_MLPERF_LOADGEN_MODE: `accuracy` - * CM_MLPERF_LOADGEN_SCENARIO: `Offline` - * CM_OUTPUT_FOLDER_NAME: `test_results` - * CM_MLPERF_RUN_STYLE: `test` - * CM_TEST_QUERY_COUNT: `10` - * CM_MLPERF_QUANTIZATION: `False` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "app vision language mlcommons mlperf inference generic [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/benchmark-program-mlperf/index.md b/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/benchmark-program-mlperf/index.md deleted file mode 100644 index 482ef2c01..000000000 --- a/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/benchmark-program-mlperf/index.md +++ /dev/null @@ -1,100 +0,0 @@ -# benchmark-program-mlperf -Automatically generated README for this automation recipe: **benchmark-program-mlperf** - -Category: **[Modular MLPerf inference benchmark pipeline](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/benchmark-program-mlperf/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "mlperf benchmark-mlperf" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=mlperf,benchmark-mlperf[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "mlperf benchmark-mlperf [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'mlperf,benchmark-mlperf' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "mlperf benchmark-mlperf[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**power-mode**" -
- Click here to expand this section. - - * **`_no-power`** (default) - * `_power` - - ENV variables: - - CM_MLPERF_POWER: `yes` - -
- - - ##### Default variations - - `_no-power` - -___ -#### Script output -```bash -cmr "mlperf benchmark-mlperf [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/index.md b/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/index.md deleted file mode 100644 index 01e67ecc5..000000000 --- a/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/index.md +++ /dev/null @@ -1,7 +0,0 @@ -* [app-loadgen-generic-python](app-loadgen-generic-python/index.md) -* [app-mlperf-inference](app-mlperf-inference/index.md) -* [app-mlperf-inference-ctuning-cpp-tflite](app-mlperf-inference-ctuning-cpp-tflite/index.md) -* [app-mlperf-inference-mlcommons-cpp](app-mlperf-inference-mlcommons-cpp/index.md) -* [app-mlperf-inference-mlcommons-python](app-mlperf-inference-mlcommons-python/index.md) -* [benchmark-program-mlperf](benchmark-program-mlperf/index.md) -* [run-mlperf-inference-app](run-mlperf-inference-app/index.md) diff --git a/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/run-mlperf-inference-app/index.md b/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/run-mlperf-inference-app/index.md deleted file mode 100644 index c05c90c38..000000000 --- a/docs/scripts/Modular-MLPerf-inference-benchmark-pipeline/run-mlperf-inference-app/index.md +++ /dev/null @@ -1,326 +0,0 @@ -# run-mlperf-inference-app -Automatically generated README for this automation recipe: **run-mlperf-inference-app** - -Category: **[Modular MLPerf inference benchmark pipeline](..)** - -License: **Apache 2.0** - -Developers: [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh), [Grigori Fursin](https://cKnowledge.org/gfursin) -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-inference-app/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/run-mlperf-inference-app/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "run-mlperf,inference" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=run-mlperf,inference[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "run-mlperf,inference [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'run-mlperf,inference' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "run-mlperf,inference[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_all-scenarios` - - ENV variables: - - CM_MLPERF_LOADGEN_ALL_SCENARIOS: `yes` - * `_compliance` - - ENV variables: - - CM_MLPERF_LOADGEN_COMPLIANCE: `yes` - * `_dashboard` - - ENV variables: - - CM_MLPERF_DASHBOARD: `on` - -
- - - * Group "**benchmark-version**" -
- Click here to expand this section. - - * `_r2.1` - - ENV variables: - - CM_MLPERF_INFERENCE_VERSION: `2.1` - - CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: `r2.1_default` - * `_r3.0` - - ENV variables: - - CM_MLPERF_INFERENCE_VERSION: `3.0` - - CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: `r3.0_default` - * `_r3.1` - - ENV variables: - - CM_MLPERF_INFERENCE_VERSION: `3.1` - - CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: `r3.1_default` - * `_r4.0` - - ENV variables: - - CM_MLPERF_INFERENCE_VERSION: `4.0` - - CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: `r4.0_default` - * `_r4.1` - - ENV variables: - - CM_MLPERF_INFERENCE_VERSION: `4.1` - - CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: `r4.1_default` - -
- - - * Group "**mode**" -
- Click here to expand this section. - - * `_all-modes` - - ENV variables: - - CM_MLPERF_LOADGEN_ALL_MODES: `yes` - -
- - - * Group "**submission-generation**" -
- Click here to expand this section. - - * `_accuracy-only` - - ENV variables: - - CM_MLPERF_LOADGEN_MODE: `accuracy` - - CM_MLPERF_SUBMISSION_RUN: `yes` - - CM_RUN_MLPERF_ACCURACY: `on` - - CM_RUN_SUBMISSION_CHECKER: `no` - * `_find-performance` - - ENV variables: - - CM_MLPERF_FIND_PERFORMANCE_MODE: `yes` - - CM_MLPERF_LOADGEN_ALL_MODES: `no` - - CM_MLPERF_LOADGEN_MODE: `performance` - - CM_MLPERF_RESULT_PUSH_TO_GITHUB: `False` - * **`_performance-and-accuracy`** (default) - * `_performance-only` - - ENV variables: - - CM_MLPERF_LOADGEN_MODE: `performance` - - CM_MLPERF_SUBMISSION_RUN: `yes` - - CM_RUN_SUBMISSION_CHECKER: `no` - * `_populate-readme` - - ENV variables: - - CM_MLPERF_README: `yes` - - CM_MLPERF_SUBMISSION_RUN: `yes` - - CM_RUN_SUBMISSION_CHECKER: `no` - * `_submission` - - ENV variables: - - CM_MLPERF_LOADGEN_COMPLIANCE: `yes` - - CM_MLPERF_SUBMISSION_RUN: `yes` - - CM_RUN_MLPERF_ACCURACY: `on` - - CM_RUN_SUBMISSION_CHECKER: `yes` - - CM_TAR_SUBMISSION_DIR: `yes` - -
- - - * Group "**submission-generation-style**" -
- Click here to expand this section. - - * `_full` - - ENV variables: - - CM_MLPERF_SUBMISSION_GENERATION_STYLE: `full` - - CM_MLPERF_SKIP_SUBMISSION_GENERATION: `yes` - * **`_short`** (default) - - ENV variables: - - CM_MLPERF_SUBMISSION_GENERATION_STYLE: `short` - -
- - - ##### Default variations - - `_performance-and-accuracy,_short` -=== "Input Flags" - - - #### Input Flags - - * --**division:** MLPerf division {open,closed} (*open*) - * --**category:** MLPerf category {edge,datacenter,network} (*edge*) - * --**device:** MLPerf device {cpu,cuda,rocm,qaic} (*cpu*) - * --**model:** MLPerf model {resnet50,retinanet,bert-99,bert-99.9,3d-unet-99,3d-unet-99.9,rnnt,dlrm-v2-99,dlrm-v2-99.9,gptj-99,gptj-99.9,sdxl,llama2-70b-99,llama2-70b-99.9,mobilenet,efficientnet} (*resnet50*) - * --**precision:** MLPerf model precision {float32,float16,bfloat16,int8,uint8} - * --**implementation:** MLPerf implementation {mlcommons-python,mlcommons-cpp,nvidia,intel,qualcomm,ctuning-cpp-tflite} (*mlcommons-python*) - * --**backend:** MLPerf framework (backend) {onnxruntime,tf,pytorch,deepsparse,tensorrt,glow,tvm-onnx} (*onnxruntime*) - * --**scenario:** MLPerf scenario {Offline,Server,SingleStream,MultiStream} (*Offline*) - * --**mode:** MLPerf benchmark mode {,accuracy,performance} - * --**execution_mode:** MLPerf execution mode {test,fast,valid} (*test*) - * --**sut:** SUT configuration (if known) - * --**submitter:** Submitter name (without space) (*CTuning*) - * --**results_dir:** Folder path to store results (defaults to the current working directory) - * --**submission_dir:** Folder path to store MLPerf submission tree - * --**adr.compiler.tags:** Compiler for loadgen and any C/C++ part of implementation - * --**adr.inference-src-loadgen.env.CM_GIT_URL:** Git URL for MLPerf inference sources to build LoadGen (to enable non-reference implementations) - * --**adr.inference-src.env.CM_GIT_URL:** Git URL for MLPerf inference sources to run benchmarks (to enable non-reference implementations) - * --**adr.mlperf-inference-implementation.max_batchsize:** Maximum batchsize to be used - * --**adr.mlperf-inference-implementation.num_threads:** Number of threads (reference & C++ implementation only) - * --**adr.python.name:** Python virtual environment name (optional) - * --**adr.python.version:** Force Python version (must have all system deps) - * --**adr.python.version_min:** Minimal Python version (*3.8*) - * --**power:** Measure power {yes,no} (*no*) - * --**adr.mlperf-power-client.power_server:** MLPerf Power server IP address (*192.168.0.15*) - * --**adr.mlperf-power-client.port:** MLPerf Power server port (*4950*) - * --**clean:** Clean run (*False*) - * --**compliance:** Whether to run compliance tests (applicable only for closed division) {yes,no} (*no*) - * --**dashboard_wb_project:** W&B dashboard project (*cm-mlperf-dse-testing*) - * --**dashboard_wb_user:** W&B dashboard user (*cmind*) - * --**hw_name:** MLPerf hardware name (for example "gcp.c3_standard_8", "nvidia_orin", "lenovo_p14s_gen_4_windows_11", "macbook_pro_m1_2", "thundercomm_rb6" ...) - * --**multistream_target_latency:** Set MultiStream target latency - * --**offline_target_qps:** Set LoadGen Offline target QPS - * --**quiet:** Quiet run (select default values for all questions) (*True*) - * --**server_target_qps:** Set Server target QPS - * --**singlestream_target_latency:** Set SingleStream target latency - * --**target_latency:** Set Target latency - * --**target_qps:** Set LoadGen target QPS - * --**j:** Print results dictionary to console at the end of the run (*False*) - * --**repro:** Record input/output/state/info files to make it easier to reproduce results (*False*) - * --**time:** Print script execution time at the end of the run (*True*) - * --**debug:** Debug this script (*False*) -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--backend=value` → `CM_MLPERF_BACKEND=value` - * `--batch_size=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` - * `--beam_size=value` → `GPTJ_BEAM_SIZE=value` - * `--category=value` → `CM_MLPERF_SUBMISSION_SYSTEM_TYPE=value` - * `--clean=value` → `CM_MLPERF_CLEAN_ALL=value` - * `--compliance=value` → `CM_MLPERF_LOADGEN_COMPLIANCE=value` - * `--dashboard_wb_project=value` → `CM_MLPERF_DASHBOARD_WANDB_PROJECT=value` - * `--dashboard_wb_user=value` → `CM_MLPERF_DASHBOARD_WANDB_USER=value` - * `--debug=value` → `CM_DEBUG_SCRIPT_BENCHMARK_PROGRAM=value` - * `--device=value` → `CM_MLPERF_DEVICE=value` - * `--division=value` → `CM_MLPERF_SUBMISSION_DIVISION=value` - * `--docker=value` → `CM_MLPERF_USE_DOCKER=value` - * `--dump_version_info=value` → `CM_DUMP_VERSION_INFO=value` - * `--execution_mode=value` → `CM_MLPERF_RUN_STYLE=value` - * `--find_performance=value` → `CM_MLPERF_FIND_PERFORMANCE_MODE=value` - * `--gpu_name=value` → `CM_NVIDIA_GPU_NAME=value` - * `--hw_name=value` → `CM_HW_NAME=value` - * `--hw_notes_extra=value` → `CM_MLPERF_SUT_SW_NOTES_EXTRA=value` - * `--imagenet_path=value` → `IMAGENET_PATH=value` - * `--implementation=value` → `CM_MLPERF_IMPLEMENTATION=value` - * `--lang=value` → `CM_MLPERF_IMPLEMENTATION=value` - * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` - * `--model=value` → `CM_MLPERF_MODEL=value` - * `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` - * `--network=value` → `CM_NETWORK_LOADGEN=value` - * `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` - * `--output_dir=value` → `OUTPUT_BASE_DIR=value` - * `--output_summary=value` → `MLPERF_INFERENCE_SUBMISSION_SUMMARY=value` - * `--output_tar=value` → `MLPERF_INFERENCE_SUBMISSION_TAR_FILE=value` - * `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value` - * `--power=value` → `CM_SYSTEM_POWER=value` - * `--precision=value` → `CM_MLPERF_MODEL_PRECISION=value` - * `--preprocess_submission=value` → `CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR=value` - * `--push_to_github=value` → `CM_MLPERF_RESULT_PUSH_TO_GITHUB=value` - * `--readme=value` → `CM_MLPERF_README=value` - * `--regenerate_accuracy_file=value` → `CM_MLPERF_REGENERATE_ACCURACY_FILE=value` - * `--regenerate_files=value` → `CM_REGENERATE_MEASURE_FILES=value` - * `--rerun=value` → `CM_RERUN=value` - * `--results_dir=value` → `OUTPUT_BASE_DIR=value` - * `--results_git_url=value` → `CM_MLPERF_RESULTS_GIT_REPO_URL=value` - * `--run_checker=value` → `CM_RUN_SUBMISSION_CHECKER=value` - * `--run_style=value` → `CM_MLPERF_RUN_STYLE=value` - * `--save_console_log=value` → `CM_SAVE_CONSOLE_LOG=value` - * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` - * `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` - * `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` - * `--skip_submission_generation=value` → `CM_MLPERF_SKIP_SUBMISSION_GENERATION=value` - * `--skip_truncation=value` → `CM_SKIP_TRUNCATE_ACCURACY=value` - * `--submission_dir=value` → `CM_MLPERF_INFERENCE_SUBMISSION_DIR=value` - * `--submitter=value` → `CM_MLPERF_SUBMITTER=value` - * `--sut=value` → `CM_MLPERF_INFERENCE_SUT_VARIATION=value` - * `--sut_servers=value` → `CM_NETWORK_LOADGEN_SUT_SERVERS=value` - * `--sw_notes_extra=value` → `CM_MLPERF_SUT_SW_NOTES_EXTRA=value` - * `--system_type=value` → `CM_MLPERF_SUBMISSION_SYSTEM_TYPE=value` - * `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` - * `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` - * `--test_query_count=value` → `CM_TEST_QUERY_COUNT=value` - * `--threads=value` → `CM_NUM_THREADS=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_MLPERF_IMPLEMENTATION: `reference` - * CM_MLPERF_MODEL: `resnet50` - * CM_MLPERF_RUN_STYLE: `test` - - -#### Versions -* `master` -* `r2.1` - -___ -#### Script output -```bash -cmr "run-mlperf,inference [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/app-mlperf-training-nvidia/index.md b/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/app-mlperf-training-nvidia/index.md deleted file mode 100644 index 676d85570..000000000 --- a/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/app-mlperf-training-nvidia/index.md +++ /dev/null @@ -1,165 +0,0 @@ -# app-mlperf-training-nvidia -Automatically generated README for this automation recipe: **app-mlperf-training-nvidia** - -Category: **[Modular MLPerf training benchmark pipeline](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-training-nvidia/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "app vision language mlcommons mlperf training nvidia" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=app,vision,language,mlcommons,mlperf,training,nvidia[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "app vision language mlcommons mlperf training nvidia [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'app,vision,language,mlcommons,mlperf,training,nvidia' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "app vision language mlcommons mlperf training nvidia[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_bert` - - ENV variables: - - CM_MLPERF_MODEL: `bert` - -
- - - * Group "**device**" -
- Click here to expand this section. - - * **`_cuda`** (default) - - ENV variables: - - CM_MLPERF_DEVICE: `cuda` - - USE_CUDA: `True` - * `_tpu` - - ENV variables: - - CM_MLPERF_DEVICE: `tpu` - - CUDA_VISIBLE_DEVICES: `` - - USE_CUDA: `False` - -
- - - * Group "**framework**" -
- Click here to expand this section. - - * `_pytorch` - - ENV variables: - - CM_MLPERF_BACKEND: `pytorch` - - CM_MLPERF_BACKEND_VERSION: `<<>>` - * `_tf` - - Aliases: `_tensorflow` - - ENV variables: - - CM_MLPERF_BACKEND: `tf` - - CM_MLPERF_BACKEND_VERSION: `<<>>` - -
- - - ##### Default variations - - `_cuda` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--clean=value` → `CM_MLPERF_CLEAN_SUBMISSION_DIR=value` - * `--docker=value` → `CM_RUN_DOCKER_CONTAINER=value` - * `--hw_name=value` → `CM_HW_NAME=value` - * `--model=value` → `CM_MLPERF_CUSTOM_MODEL_PATH=value` - * `--num_threads=value` → `CM_NUM_THREADS=value` - * `--output_dir=value` → `OUTPUT_BASE_DIR=value` - * `--rerun=value` → `CM_RERUN=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `nvidia` - - - -#### Native script being run -=== "Linux/macOS" - * [run-bert-training.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-training-nvidia/run-bert-training.sh) - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-training-nvidia/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "app vision language mlcommons mlperf training nvidia [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/app-mlperf-training-reference/index.md b/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/app-mlperf-training-reference/index.md deleted file mode 100644 index 4adad297f..000000000 --- a/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/app-mlperf-training-reference/index.md +++ /dev/null @@ -1,166 +0,0 @@ -# app-mlperf-training-reference -Automatically generated README for this automation recipe: **app-mlperf-training-reference** - -Category: **[Modular MLPerf training benchmark pipeline](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-training-reference/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "app vision language mlcommons mlperf training reference ref" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=app,vision,language,mlcommons,mlperf,training,reference,ref[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "app vision language mlcommons mlperf training reference ref [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'app,vision,language,mlcommons,mlperf,training,reference,ref' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "app vision language mlcommons mlperf training reference ref[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_bert` - - ENV variables: - - CM_MLPERF_MODEL: `bert` - -
- - - * Group "**device**" -
- Click here to expand this section. - - * **`_cuda`** (default) - - ENV variables: - - CM_MLPERF_DEVICE: `cuda` - - USE_CUDA: `True` - * `_tpu` - - ENV variables: - - CM_MLPERF_DEVICE: `tpu` - - CUDA_VISIBLE_DEVICES: `` - - USE_CUDA: `False` - -
- - - * Group "**framework**" -
- Click here to expand this section. - - * `_pytorch` - - ENV variables: - - CM_MLPERF_BACKEND: `pytorch` - - CM_MLPERF_BACKEND_VERSION: `<<>>` - * `_tf` - - Aliases: `_tensorflow` - - ENV variables: - - CM_MLPERF_BACKEND: `tf` - - CM_MLPERF_BACKEND_VERSION: `<<>>` - -
- - - ##### Default variations - - `_cuda` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--clean=value` → `CM_MLPERF_CLEAN_SUBMISSION_DIR=value` - * `--docker=value` → `CM_RUN_DOCKER_CONTAINER=value` - * `--hw_name=value` → `CM_HW_NAME=value` - * `--model=value` → `CM_MLPERF_CUSTOM_MODEL_PATH=value` - * `--num_threads=value` → `CM_NUM_THREADS=value` - * `--output_dir=value` → `OUTPUT_BASE_DIR=value` - * `--rerun=value` → `CM_RERUN=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `reference` - * CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX: `` - - - -#### Native script being run -=== "Linux/macOS" - * [run-bert-training.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-training-reference/run-bert-training.sh) - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-training-reference/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "app vision language mlcommons mlperf training reference ref [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/index.md b/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/index.md deleted file mode 100644 index 73140884b..000000000 --- a/docs/scripts/Modular-MLPerf-training-benchmark-pipeline/index.md +++ /dev/null @@ -1,2 +0,0 @@ -* [app-mlperf-training-nvidia](app-mlperf-training-nvidia/index.md) -* [app-mlperf-training-reference](app-mlperf-training-reference/index.md) diff --git a/docs/scripts/Modular-application-pipeline/app-image-corner-detection/index.md b/docs/scripts/Modular-application-pipeline/app-image-corner-detection/index.md deleted file mode 100644 index 1d71d7f6f..000000000 --- a/docs/scripts/Modular-application-pipeline/app-image-corner-detection/index.md +++ /dev/null @@ -1,87 +0,0 @@ -# app-image-corner-detection -Automatically generated README for this automation recipe: **app-image-corner-detection** - -Category: **[Modular application pipeline](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-corner-detection/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-corner-detection/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "app image corner-detection" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=app,image,corner-detection - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "app image corner-detection " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'app,image,corner-detection' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "app image corner-detection" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-image-corner-detection/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "app image corner-detection " -j -``` \ No newline at end of file diff --git a/docs/scripts/Modular-application-pipeline/index.md b/docs/scripts/Modular-application-pipeline/index.md deleted file mode 100644 index 96076be6f..000000000 --- a/docs/scripts/Modular-application-pipeline/index.md +++ /dev/null @@ -1 +0,0 @@ -* [app-image-corner-detection](app-image-corner-detection/index.md) diff --git a/docs/scripts/Platform-information/detect-cpu/index.md b/docs/scripts/Platform-information/detect-cpu/index.md deleted file mode 100644 index a45f8a03f..000000000 --- a/docs/scripts/Platform-information/detect-cpu/index.md +++ /dev/null @@ -1,87 +0,0 @@ -# detect-cpu -Automatically generated README for this automation recipe: **detect-cpu** - -Category: **[Platform information](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-cpu/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-cpu/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "detect cpu detect-cpu info" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=detect,cpu,detect-cpu,info - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "detect cpu detect-cpu info " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'detect,cpu,detect-cpu,info' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "detect cpu detect-cpu info" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-cpu/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-cpu/run.bat) -___ -#### Script output -```bash -cmr "detect cpu detect-cpu info " -j -``` \ No newline at end of file diff --git a/docs/scripts/Platform-information/detect-os/index.md b/docs/scripts/Platform-information/detect-os/index.md deleted file mode 100644 index 92e04fa5e..000000000 --- a/docs/scripts/Platform-information/detect-os/index.md +++ /dev/null @@ -1,86 +0,0 @@ -# detect-os -Automatically generated README for this automation recipe: **detect-os** - -Category: **[Platform information](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-os/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "detect-os detect os info" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=detect-os,detect,os,info - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "detect-os detect os info " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'detect-os,detect,os,info' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "detect-os detect os info" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-os/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/detect-os/run.bat) -___ -#### Script output -```bash -cmr "detect-os detect os info " -j -``` \ No newline at end of file diff --git a/docs/scripts/Platform-information/index.md b/docs/scripts/Platform-information/index.md deleted file mode 100644 index 7a25f8c34..000000000 --- a/docs/scripts/Platform-information/index.md +++ /dev/null @@ -1,2 +0,0 @@ -* [detect-cpu](detect-cpu/index.md) -* [detect-os](detect-os/index.md) diff --git a/docs/scripts/Python-automation/activate-python-venv/index.md b/docs/scripts/Python-automation/activate-python-venv/index.md deleted file mode 100644 index 90f6bb959..000000000 --- a/docs/scripts/Python-automation/activate-python-venv/index.md +++ /dev/null @@ -1,88 +0,0 @@ -# Activate virtual Python environment -Automatically generated README for this automation recipe: **activate-python-venv** - -Category: **[Python automation](..)** - -License: **Apache 2.0** - -Developers: [Grigori Fursin](https://cKnowledge.org/gfursin) -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/activate-python-venv/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/activate-python-venv/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "activate python-venv" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=activate,python-venv - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "activate python-venv " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'activate,python-venv' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "activate python-venv" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/activate-python-venv/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/activate-python-venv/run.bat) -___ -#### Script output -```bash -cmr "activate python-venv " -j -``` \ No newline at end of file diff --git a/docs/scripts/Python-automation/get-generic-python-lib/index.md b/docs/scripts/Python-automation/get-generic-python-lib/index.md deleted file mode 100644 index 0cf418eb2..000000000 --- a/docs/scripts/Python-automation/get-generic-python-lib/index.md +++ /dev/null @@ -1,421 +0,0 @@ -# get-generic-python-lib -Automatically generated README for this automation recipe: **get-generic-python-lib** - -Category: **[Python automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-generic-python-lib/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-generic-python-lib/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get generic-python-lib" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,generic-python-lib[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get generic-python-lib [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,generic-python-lib' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get generic-python-lib[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_Pillow` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `Pillow` - * `_anthropic` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `anthropic` - * `_apache-tvm` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `apache-tvm` - - CM_GENERIC_PYTHON_PIP_EXTRA: ` --pre` - * `_apex` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `apex` - * `_async_timeout` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `async_timeout` - * `_attr` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `attr` - * `_attrs` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `attrs` - * `_boto3` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `boto3` - * `_cloudpickle` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `cloudpickle` - * `_cmind` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `cmind` - * `_colored` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `colored` - - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: `https://pypi.ngc.nvidia.com` - * `_conda.#` - * `_cupy` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `cupy` - * `_custom-python` - - ENV variables: - - CM_TMP_USE_CUSTOM_PYTHON: `on` - * `_datasets` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `datasets` - * `_decorator` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `decorator` - * `_deepsparse` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `deepsparse` - * `_dllogger` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `dllogger` - - CM_GENERIC_PYTHON_PIP_URL: `git+https://github.com/NVIDIA/dllogger#egg=dllogger` - * `_fiftyone` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `fiftyone` - * `_google-api-python-client` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `google_api_python_client` - * `_google-auth-oauthlib` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `google_auth_oauthlib` - * `_huggingface_hub` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `huggingface_hub` - * `_inflect` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `inflect` - * `_jax` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `jax` - * `_jax_cuda` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `jax[cuda]` - - CM_GENERIC_PYTHON_PIP_EXTRA: `-f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html` - - CM_JAX_VERSION_EXTRA: `CUDA` - * `_librosa` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `librosa` - * `_matplotlib` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `matplotlib` - * `_mlperf_loadgen` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `mlperf_loadgen` - - CM_GENERIC_PYTHON_PIP_URL: `git+https://github.com/mlcommons/inference.git#subdirectory=loadgen` - * `_mlperf_logging` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `mlperf_logging` - - CM_GENERIC_PYTHON_PIP_URL: `git+https://github.com/mlperf/logging.git` - * `_mpld3` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `mpld3` - * `_nibabel` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `nibabel` - * `_numpy` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `numpy` - * `_nvidia-apex` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `apex` - - CM_GENERIC_PYTHON_PACKAGE_VARIANT: `nvidia-apex` - - CM_GENERIC_PYTHON_PIP_URL: `git+https://github.com/nvidia/apex@0da3ffb92ee6fbe5336602f0e3989db1cd16f880` - * `_nvidia-apex-from-src` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `apex` - - CM_GENERIC_PYTHON_PACKAGE_VARIANT: `nvidia-apex` - * `_nvidia-dali` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `nvidia-dali-cuda120` - - CM_GENERIC_PYTHON_PIP_EXTRA: ` --upgrade --default-timeout=900` - - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: `https://developer.download.nvidia.com/compute/redist` - * `_nvidia-pycocotools` - - ENV variables: - - CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: `pycocotools` - - CM_GENERIC_PYTHON_PIP_URL: `pycocotools@git+https://github.com/NVIDIA/cocoapi#subdirectory=PythonAPI` - * `_nvidia-pyindex` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `nvidia-pyindex` - * `_nvidia-tensorrt` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `nvidia-tensorrt` - * `_onnx` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `onnx` - * `_onnx-graphsurgeon` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `onnx_graphsurgeon` - * `_onnxruntime` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `onnxruntime` - * `_onnxruntime_gpu` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `onnxruntime_gpu` - - CM_ONNXRUNTIME_VERSION_EXTRA: `GPU` - * `_openai` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `openai` - * `_opencv-python` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `opencv-python` - * `_package.#` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `#` - - CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS: `` - - CM_GENERIC_PYTHON_PIP_URL: `` - * `_pandas` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `pandas` - * `_path.#` - - ENV variables: - - CM_GENERIC_PYTHON_PIP_URL: `#` - * `_pillow` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `Pillow` - * `_pip` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `pip` - * `_polygraphy` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `polygraphy` - - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: `https://pypi.ngc.nvidia.com` - * `_pre` - - ENV variables: - - CM_GENERIC_PYTHON_DEV_VERSION: `yes` - * `_protobuf` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `protobuf` - * `_psutil` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `psutil` - * `_pycocotools` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `pycocotools` - * `_pycuda` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `pycuda` - * `_ray` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `ray[default]` - * `_requests` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `requests` - * `_rocm` - * `_safetensors` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `safetensors` - * `_scikit-learn` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `scikit-learn` - * `_scipy` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `scipy` - * `_scons` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `scons` - * `_setfit` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `setfit` - * `_setuptools` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `setuptools` - * `_six` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `six` - * `_sklearn` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `sklearn` - * `_sox` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `sox` - * `_sparsezoo` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `sparsezoo` - * `_streamlit` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `streamlit` - * `_streamlit_option_menu` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `streamlit_option_menu` - * `_tensorboard` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `tensorboard` - * `_tensorflow` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `tensorflow` - * `_tensorrt` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `tensorrt` - - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: `https://download.pytorch.org/whl/<<>>` - - CM_TORCH_VERSION_EXTRA: `CUDA` - * `_tflite` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `tflite` - * `_tflite-runtime` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `tflite-runtime` - * `_tokenization` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `tokenization` - * `_toml` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `toml` - * `_torch` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `torch` - - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: `https://download.pytorch.org/whl/cpu` - * `_torch_cuda` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `torch` - - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL1: `https://download.pytorch.org/whl/<<>>` - - CM_TORCH_VERSION_EXTRA: `CUDA` - * `_torch_tensorrt` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `torch-tensorrt` - - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: `https://download.pytorch.org/whl/<<>>` - - CM_TORCH_VERSION_EXTRA: `CUDA` - * `_torchaudio` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `torchaudio` - - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: `https://download.pytorch.org/whl/cpu` - * `_torchaudio_cuda` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `torchaudio` - - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL1: `https://download.pytorch.org/whl/<<>>` - - CM_TORCHAUDIO_VERSION_EXTRA: `CUDA` - * `_torchvision` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `torchvision` - - CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: `https://download.pytorch.org/whl/cpu` - * `_torchvision_cuda` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `torchvision` - - CM_TORCHVISION_VERSION_EXTRA: `CUDA` - * `_tornado` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `tornado` - * `_tqdm` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `tqdm` - * `_transformers` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `transformers` - * `_typing_extensions` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `typing_extensions` - * `_ujson` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `ujson` - * `_unidecode` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `unidecode` - * `_url.#` - - ENV variables: - - CM_GENERIC_PYTHON_PIP_URL: `#` - - CM_TMP_PYTHON_PACKAGE_FORCE_INSTALL: `yes` - * `_wandb` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `wandb` - * `_west` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `west` - * `_xgboost` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `xgboost` - * `_xlsxwriter` - - ENV variables: - - CM_GENERIC_PYTHON_PACKAGE_NAME: `xlsxwriter` - -
- -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--extra_index_url=value` → `CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL=value` - * `--force_install=value` → `CM_TMP_PYTHON_PACKAGE_FORCE_INSTALL=value` - * `--index_url=value` → `CM_GENERIC_PYTHON_PIP_INDEX_URL=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-generic-python-lib/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-generic-python-lib/run.bat) -___ -#### Script output -```bash -cmr "get generic-python-lib [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Python-automation/get-python3/index.md b/docs/scripts/Python-automation/get-python3/index.md deleted file mode 100644 index 9544d8790..000000000 --- a/docs/scripts/Python-automation/get-python3/index.md +++ /dev/null @@ -1,111 +0,0 @@ -# get-python3 -Automatically generated README for this automation recipe: **get-python3** - -Category: **[Python automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-python3/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-python3/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get python python3 get-python get-python3" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,python,python3,get-python,get-python3[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get python python3 get-python get-python3 [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,python,python3,get-python,get-python3' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get python python3 get-python get-python3[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_conda.#` - - ENV variables: - - CM_PYTHON_CONDA: `yes` - - CM_PYTHON_INSTALL_CACHE_TAGS: `_conda.#` - * `_custom-path.#` - - ENV variables: - - CM_PYTHON_BIN_WITH_PATH: `#` - * `_lto` - * `_optimized` - * `_shared` - * `_with-custom-ssl` - * `_with-ssl` - -
- - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-python3/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/get-python3/run.bat) -___ -#### Script output -```bash -cmr "get python python3 get-python get-python3 [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/Python-automation/index.md b/docs/scripts/Python-automation/index.md deleted file mode 100644 index 38a4cd7ec..000000000 --- a/docs/scripts/Python-automation/index.md +++ /dev/null @@ -1,6 +0,0 @@ -* [activate-python-venv](activate-python-venv/index.md) -* [get-generic-python-lib](get-generic-python-lib/index.md) -* [get-python3](get-python3/index.md) -* [install-generic-conda-package](install-generic-conda-package/index.md) -* [install-python-src](install-python-src/index.md) -* [install-python-venv](install-python-venv/index.md) diff --git a/docs/scripts/Python-automation/install-generic-conda-package/index.md b/docs/scripts/Python-automation/install-generic-conda-package/index.md deleted file mode 100644 index 1c663d574..000000000 --- a/docs/scripts/Python-automation/install-generic-conda-package/index.md +++ /dev/null @@ -1,113 +0,0 @@ -# install-generic-conda-package -Automatically generated README for this automation recipe: **install-generic-conda-package** - -Category: **[Python automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-generic-conda-package/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get install generic generic-conda-lib conda-lib conda-package generic-conda-package" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,install,generic,generic-conda-lib,conda-lib,conda-package,generic-conda-package[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get install generic generic-conda-lib conda-lib conda-package generic-conda-package [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,install,generic,generic-conda-lib,conda-lib,conda-package,generic-conda-package' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get install generic generic-conda-lib conda-lib conda-package generic-conda-package[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_name.#` - * `_package.#` - - ENV variables: - - CM_CONDA_PKG_NAME: `#` - -
- - - * Group "**package-source**" -
- Click here to expand this section. - - * `_source.#` - - ENV variables: - - CM_CONDA_PKG_SRC: `#` - -
- - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-generic-conda-package/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get install generic generic-conda-lib conda-lib conda-package generic-conda-package [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/Python-automation/install-python-src/index.md b/docs/scripts/Python-automation/install-python-src/index.md deleted file mode 100644 index f43cebd9f..000000000 --- a/docs/scripts/Python-automation/install-python-src/index.md +++ /dev/null @@ -1,144 +0,0 @@ -# install-python-src -Automatically generated README for this automation recipe: **install-python-src** - -Category: **[Python automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-python-src/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install src python python3 src-python3 src-python" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,src,python,python3,src-python3,src-python[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install src python python3 src-python3 src-python [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,src,python,python3,src-python3,src-python' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install src python python3 src-python3 src-python[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_lto` - - ENV variables: - - CM_PYTHON_LTO_FLAG: ` --lto` - - CM_PYTHON_INSTALL_CACHE_TAGS: `with-lto` - * `_optimized` - - ENV variables: - - CM_PYTHON_OPTIMIZATION_FLAG: ` --enable-optimizations` - - CM_PYTHON_INSTALL_CACHE_TAGS: `optimized` - * `_shared` - - ENV variables: - - CM_PYTHON_INSTALL_CACHE_TAGS: `shared` - - CM_SHARED_BUILD: `yes` - * `_with-custom-ssl` - - ENV variables: - - CM_CUSTOM_SSL: `yes` - - CM_PYTHON_INSTALL_CACHE_TAGS: `with-custom-ssl` - -
- - - * Group "**ssl**" -
- Click here to expand this section. - - * `_with-ssl` - - ENV variables: - - CM_ENABLE_SSL: `yes` - - CM_PYTHON_INSTALL_CACHE_TAGS: `with-ssl` - -
- -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_ENABLE_SSL: `no` - * CM_CUSTOM_SSL: `no` - * CM_SHARED_BUILD: `no` - * CM_PYTHON_OPTIMIZATION_FLAG: `` - * CM_PYTHON_LTO_FLAG: `` - * CM_WGET_URL: `https://www.python.org/ftp/python/[PYTHON_VERSION]/Python-[PYTHON_VERSION].tgz` - - -#### Versions -Default version: `3.10.13` - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-python-src/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "install src python python3 src-python3 src-python [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/Python-automation/install-python-venv/index.md b/docs/scripts/Python-automation/install-python-venv/index.md deleted file mode 100644 index f097aa196..000000000 --- a/docs/scripts/Python-automation/install-python-venv/index.md +++ /dev/null @@ -1,103 +0,0 @@ -# install-python-venv -Automatically generated README for this automation recipe: **install-python-venv** - -Category: **[Python automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/install-python-venv/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "install python get-python-venv python-venv" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=install,python,get-python-venv,python-venv[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "install python get-python-venv python-venv [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'install,python,get-python-venv,python-venv' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "install python get-python-venv python-venv[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_lto` - * `_optimized` - * `_shared` - * `_with-custom-ssl` - * `_with-ssl` - -
- - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/install-python-venv/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/install-python-venv/run.bat) -___ -#### Script output -```bash -cmr "install python get-python-venv python-venv [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/Remote-automation/index.md b/docs/scripts/Remote-automation/index.md deleted file mode 100644 index 754c07ddf..000000000 --- a/docs/scripts/Remote-automation/index.md +++ /dev/null @@ -1 +0,0 @@ -* [remote-run-commands](remote-run-commands/index.md) diff --git a/docs/scripts/Remote-automation/remote-run-commands/index.md b/docs/scripts/Remote-automation/remote-run-commands/index.md deleted file mode 100644 index afb2ccf0c..000000000 --- a/docs/scripts/Remote-automation/remote-run-commands/index.md +++ /dev/null @@ -1,117 +0,0 @@ -# remote-run-commands -Automatically generated README for this automation recipe: **remote-run-commands** - -Category: **[Remote automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/remote-run-commands/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/remote-run-commands/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "remote run cmds remote-run remote-run-cmds ssh-run ssh" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=remote,run,cmds,remote-run,remote-run-cmds,ssh-run,ssh [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "remote run cmds remote-run remote-run-cmds ssh-run ssh " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'remote,run,cmds,remote-run,remote-run-cmds,ssh-run,ssh' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "remote run cmds remote-run remote-run-cmds ssh-run ssh" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--client_refresh=value` → `CM_SSH_CLIENT_REFRESH=value` - * `--host=value` → `CM_SSH_HOST=value` - * `--password=value` → `CM_SSH_PASSWORD=value` - * `--port=value` → `CM_SSH_PORT=value` - * `--run_cmds=value` → `CM_SSH_RUN_COMMANDS=value` - * `--skip_host_verify=value` → `CM_SSH_SKIP_HOST_VERIFY=value` - * `--ssh_key_file=value` → `CM_SSH_KEY_FILE=value` - * `--user=value` → `CM_SSH_USER=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_SSH_PORT: `22` - * CM_SSH_HOST: `localhost` - * CM_SSH_USER: `$USER` - * CM_SSH_CLIENT_REFRESH: `10` - * CM_SSH_KEY_FILE: `$HOME/.ssh/id_rsa` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/remote-run-commands/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/remote-run-commands/run.bat) -___ -#### Script output -```bash -cmr "remote run cmds remote-run remote-run-cmds ssh-run ssh " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Reproduce-MLPerf-benchmarks/app-mlperf-inference-nvidia/index.md b/docs/scripts/Reproduce-MLPerf-benchmarks/app-mlperf-inference-nvidia/index.md deleted file mode 100644 index f44318297..000000000 --- a/docs/scripts/Reproduce-MLPerf-benchmarks/app-mlperf-inference-nvidia/index.md +++ /dev/null @@ -1,652 +0,0 @@ -# app-mlperf-inference-nvidia -Automatically generated README for this automation recipe: **app-mlperf-inference-nvidia** - -Category: **[Reproduce MLPerf benchmarks](..)** - -License: **Apache 2.0** - - - ---- - -This script is a CM wrapper to the official [Nvidia submission code](https://github.com/mlcommons/inference_results_v3.0/tree/master/closed/NVIDIA) used for MLPerf inference submissions. - - - -## Download the needed files - -* Please ask privately in [this discord channel](https://discord.gg/y7hupJsUNb) if you would like to get access to an Amazon S3 bucket containing all the needed files for easiness. Otherwise, you can download them from the below links. - -For x86 machines, please download the latest install tar files from the below sites -1. [cuDNN](https://developer.nvidia.com/cudnn) (for cuda 11) -2. [TensorRT](https://developer.nvidia.com/tensorrt) -3. Imagenet validation set (unfortunately not available via public URL) following the instructions given [here](https://github.com/mlcommons/ck/blob/master/cm-mlops/script/get-dataset-imagenet-val/README-extra.md) - -
- - - -## Using Docker (Recommended on x86 systems) - - -Assuming all the downloaded files are to the user home directory please do the following steps: - -1. Download CUDA 11.8 - ``` - wget https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run - ``` -2. [Install docker](https://docs.docker.com/engine/install/) and [Nvidia container toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) - -3. Give docker permission to the current user - ``` - sudo usermod -aG docker $USER - ``` - Logout and login - Restart docker if required and confirm that Nvidia container toolkit is working by - ``` - nvidia-ctk --version - ``` -4. Check if Nvidia driver is working properly on the host. - ``` - nvidia-smi - ``` - If the above command produces any error you'll need to install Nvidia drivers on the host. You can do this via CM if you have sudo access - ``` - cmr "install cuda prebuilt _driver" --version=11.8.0 - ``` -5. Build the docker container and mount the paths from the host machine. - ** You may want to change the `scratch_path` location as it can take 100s of GBs.** - ```bash - cm docker script --tags=build,nvidia,inference,server \ - --cuda_run_file_path=$HOME/cuda_11.8.0_520.61.05_linux.run \ - --tensorrt_tar_file_path=$HOME/TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-11.8.tar.gz \ - --cudnn_tar_file_path=$HOME/cudnn-linux-x86_64-8.9.2.26_cuda11-archive.tar.xz \ - --imagenet_path=$HOME/imagenet-2012-val \ - --scratch_path=$HOME/mlperf_scratch \ - --docker_cm_repo=mlcommons@cm4mlops \ - --results_dir=$HOME/results_dir \ - --submission_dir=$HOME/submission_dir \ - --adr.compiler.tags=gcc - ``` - * Use `--docker_cache=no` to turn off docker caching - * Use `--docker_run_cmd_prefix="cm pull repo mlcommons@cm4mlops --checkout=dev"` to update the CK repository when docker caching is used - * Use `--custom_system=no` if you are using a similar system to the [Nvidia submission systems for MLPerf inference 3.0](https://github.com/mlcommons/inference_results_v3.0/tree/main/closed/NVIDIA/systems). - -6. At the end of the build you'll get the following prompt unless you have chosen `--custom_system=no`. Please give a system name and say yes to generating the configuration files - ### Example output - ``` - ============================================ - => A system ID is a string containing only letters, numbers, and underscores - => that is used as the human-readable name of the system. It is also used as - => the system name when creating the measurements/ and results/ entries. - => This string should also start with a letter to be a valid Python enum member name. - => Specify the system ID to use for the current system: phoenix - => Reloaded system list. MATCHED_SYSTEM: KnownSystem.phoenix - => This script will generate Benchmark Configuration stubs for the detected system. - Continue? [y/n]: y - ``` - Now you'll be inside the CM Nvidia docker container and can run further scripts. - -7. Once the build is complete, you can proceed with any further CM scripts like for MLPerf inference. You can also save the container at this stage using [docker commit](https://docs.docker.com/engine/reference/commandline/commit/) so that it can be launched later without having to go through the previous steps. - -
- -
- - - -## Without Docker - - -1. Install CUDA - If CUDA is not detected, CM should download and install it automatically when you run the workflow. - ** Nvidia drivers are expected to be installed on the system ** - -2. Install cuDNN - ```bash - cmr "get cudnn" --tar_file= - ``` -3. Install TensorRT - ```bash - cmr "get tensorrt _dev" --tar_file= - ``` - On non x86 systems like Nvidia Orin, you can do a package manager install and then CM should pick up the installation automatically during the workflow run. - -4. Build the Nvidia inference server - ``` - cmr "build nvidia inference server" \ - --adr.install-cuda-prebuilt.local_run_file_path=/data/cuda_11.8.0_520.61.05_linux.run \ - --adr.tensorrt.tar_file=/data/TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-11.8.tar.gz \ - --adr.cudnn.tar_file=/data/cudnn-linux-x86_64-8.9.2.26_cuda11-archive.tar.xz \ - --adr.compiler.tags=gcc \ - [--custom_system=no] - ``` - Use `--custom_system=no` if you are using a similar system to the [Nvidia submission systems for MLPerf inference 3.0](https://github.com/mlcommons/inference_results_v3.0/tree/main/closed/NVIDIA/systems). - -5. At the end of the build you'll get the following prompt unless you have chosen `--custom_system=no`. Please give a system name and say yes to generating the configuration files - - ### Example output - ``` - ============================================ - => A system ID is a string containing only letters, numbers, and underscores - => that is used as the human-readable name of the system. It is also used as - => the system name when creating the measurements/ and results/ entries. - => This string should also start with a letter to be a valid Python enum member name. - => Specify the system ID to use for the current system: phoenix - => Reloaded system list. MATCHED_SYSTEM: KnownSystem.phoenix - => This script will generate Benchmark Configuration stubs for the detected system. - Continue? [y/n]: y - ``` -
- - -## Acknowledgments - -* A common CM interface and automation for MLPerf inference benchmarks was developed by Arjun Suresh and Grigori Fursin - sponsored by the [cTuning foundation](https://cTuning.org) and [cKnowledge.org](https://cKnowledge.org). -* Nvidia's MLPerf inference implementation was developed by Zhihan Jiang, Ethan Cheng, Yiheng Zhang and Jinho Suh. - - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-nvidia/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "reproduce mlcommons mlperf inference harness nvidia-harness nvidia" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=reproduce,mlcommons,mlperf,inference,harness,nvidia-harness,nvidia[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "reproduce mlcommons mlperf inference harness nvidia-harness nvidia [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'reproduce,mlcommons,mlperf,inference,harness,nvidia-harness,nvidia' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "reproduce mlcommons mlperf inference harness nvidia-harness nvidia[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_run-harness` - * `_v3.1` - - ENV variables: - - CM_MLPERF_INFERENCE_VERSION: `v3.1` - - CM_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: `GPTJ-07142023.pth` - -
- - - * Group "**backend**" -
- Click here to expand this section. - - * **`_tensorrt`** (default) - - ENV variables: - - CM_MLPERF_BACKEND: `tensorrt` - - CM_MLPERF_BACKEND_NAME: `TensorRT` - -
- - - * Group "**batch-size**" -
- Click here to expand this section. - - * `_batch_size.#` - - ENV variables: - - CM_MODEL_BATCH_SIZE: `#` - - CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: `#` - -
- - - * Group "**build-engine-options**" -
- Click here to expand this section. - - * `_build_engine_options.#` - - ENV variables: - - CM_MLPERF_NVIDIA_HARNESS_EXTRA_BUILD_ENGINE_OPTIONS: `#` - -
- - - * Group "**device**" -
- Click here to expand this section. - - * `_cpu` - - ENV variables: - - CM_MLPERF_DEVICE: `cpu` - * **`_cuda`** (default) - - ENV variables: - - CM_MLPERF_DEVICE: `gpu` - - CM_MLPERF_DEVICE_LIB_NAMESPEC: `cudart` - -
- - - * Group "**device-memory**" -
- Click here to expand this section. - - * `_gpu_memory.16` - - ENV variables: - - CM_NVIDIA_GPU_MEMORY: `16` - * `_gpu_memory.24` - - ENV variables: - - CM_NVIDIA_GPU_MEMORY: `24` - * `_gpu_memory.32` - - ENV variables: - - CM_NVIDIA_GPU_MEMORY: `32` - * `_gpu_memory.40` - - ENV variables: - - CM_NVIDIA_GPU_MEMORY: `40` - * `_gpu_memory.48` - - ENV variables: - - CM_NVIDIA_GPU_MEMORY: `48` - * `_gpu_memory.8` - - ENV variables: - - CM_NVIDIA_GPU_MEMORY: `8` - * `_gpu_memory.80` - - ENV variables: - - CM_NVIDIA_GPU_MEMORY: `80` - -
- - - * Group "**dla-batch-size**" -
- Click here to expand this section. - - * `_dla_batch_size.#` - - ENV variables: - - CM_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE: `#` - - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX2: `dla_batch_size.#` - -
- - - * Group "**gpu-connection**" -
- Click here to expand this section. - - * `_pcie` - * `_sxm` - -
- - - * Group "**gpu-name**" -
- Click here to expand this section. - - * `_a100` - - ENV variables: - - CM_NVIDIA_CUSTOM_GPU: `yes` - * `_a6000` - - ENV variables: - - CM_NVIDIA_CUSTOM_GPU: `yes` - * `_custom` - - ENV variables: - - CM_NVIDIA_CUSTOM_GPU: `yes` - - CM_MODEL_BATCH_SIZE: `` - - CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: `<<>>` - * `_l4` - - ENV variables: - - CM_NVIDIA_CUSTOM_GPU: `yes` - * `_orin` - - ENV variables: - - CM_NVIDIA_CUSTOM_GPU: `yes` - - CM_MODEL_BATCH_SIZE: `` - - CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE: `<<>>` - * `_rtx_4090` - - ENV variables: - - CM_NVIDIA_CUSTOM_GPU: `yes` - * `_rtx_6000_ada` - - ENV variables: - - CM_NVIDIA_CUSTOM_GPU: `yes` - * `_t4` - - ENV variables: - - CM_NVIDIA_CUSTOM_GPU: `yes` - -
- - - * Group "**loadgen-scenario**" -
- Click here to expand this section. - - * `_multistream` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `MultiStream` - * `_offline` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `Offline` - * `_server` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `Server` - * `_singlestream` - - ENV variables: - - CM_MLPERF_LOADGEN_SCENARIO: `SingleStream` - - CUDA_VISIBLE_DEVICES_NOT_USED: `0` - -
- - - * Group "**model**" -
- Click here to expand this section. - - * `_3d-unet-99` - - ENV variables: - - CM_MODEL: `3d-unet-99` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.onnx` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `int8` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` - * `_3d-unet-99.9` - - ENV variables: - - CM_MODEL: `3d-unet-99.9` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.onnx` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `int8` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` - * `_bert-99` - - ENV variables: - - CM_MODEL: `bert-99` - - CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `int32` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` - * `_bert-99.9` - - ENV variables: - - CM_MODEL: `bert-99.9` - - CM_NOT_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3733910/files/model.onnx` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `int32` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp16` - * `_dlrm-v2-99` - - ENV variables: - - CM_MODEL: `dlrm-v2-99` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `affine fusion` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp16` - * `_dlrm-v2-99.9` - - ENV variables: - - CM_MODEL: `dlrm-v2-99.9` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `affine fusion` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp32` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp16` - * `_gptj-99` - - ENV variables: - - CM_MODEL: `gptj-99` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `int32` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp16` - * `_gptj-99.9` - - ENV variables: - - CM_MODEL: `gptj-99.9` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `int32` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp16` - * **`_resnet50`** (default) - - ENV variables: - - CM_MODEL: `resnet50` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `int8` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` - * `_retinanet` - - ENV variables: - - CM_MODEL: `retinanet` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/6617981/files/resnext50_32x4d_fpn.pth` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `int8` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `int8` - * `_rnnt` - - ENV variables: - - CM_MODEL: `rnnt` - - CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: `https://zenodo.org/record/3662521/files/DistributedDataParallel_1576581068.9962234-epoch-100.pt` - - CM_ML_MODEL_WEIGHT_TRANSFORMATIONS: `quantization, affine fusion` - - CM_ML_MODEL_INPUTS_DATA_TYPE: `fp16` - - CM_ML_MODEL_WEIGHTS_DATA_TYPE: `fp16` - -
- - - * Group "**num-gpus**" -
- Click here to expand this section. - - * `_num-gpus.#` - - ENV variables: - - CM_NVIDIA_NUM_GPUS: `#` - * **`_num-gpus.1`** (default) - - ENV variables: - - CM_NVIDIA_NUM_GPUS: `1` - -
- - - * Group "**power-mode**" -
- Click here to expand this section. - - * `_maxn` - - ENV variables: - - CM_MLPERF_NVIDIA_HARNESS_MAXN: `True` - * `_maxq` - - ENV variables: - - CM_MLPERF_NVIDIA_HARNESS_MAXQ: `True` - -
- - - * Group "**run-mode**" -
- Click here to expand this section. - - * `_build` - - ENV variables: - - MLPERF_NVIDIA_RUN_COMMAND: `build` - - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: `build` - * `_build_engine` - - Aliases: `_build-engine` - - ENV variables: - - MLPERF_NVIDIA_RUN_COMMAND: `generate_engines` - - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: `generate_engines` - * `_calibrate` - - ENV variables: - - MLPERF_NVIDIA_RUN_COMMAND: `calibrate` - - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: `calibrate` - * `_download_model` - - ENV variables: - - MLPERF_NVIDIA_RUN_COMMAND: `download_model` - - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: `download_model` - * `_prebuild` - - ENV variables: - - MLPERF_NVIDIA_RUN_COMMAND: `prebuild` - - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: `prebuild` - * `_preprocess_data` - - ENV variables: - - MLPERF_NVIDIA_RUN_COMMAND: `preprocess_data` - - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: `preprocess_data` - * **`_run_harness`** (default) - - ENV variables: - - CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: `run_harness` - - MLPERF_NVIDIA_RUN_COMMAND: `run_harness` - - CM_CALL_MLPERF_RUNNER: `yes` - -
- - - * Group "**triton**" -
- Click here to expand this section. - - * `_use_triton` - - ENV variables: - - CM_MLPERF_NVIDIA_HARNESS_USE_TRITON: `yes` - - CM_MLPERF_SUT_NAME_RUN_CONFIG_SUFFIX3: `using_triton` - -
- - - * Group "**version**" -
- Click here to expand this section. - - * **`_v4.0`** (default) - - ENV variables: - - CM_MLPERF_INFERENCE_VERSION: `v4.0` - - CM_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: `GPTJ-FP8-quantized` - -
- - - ##### Default variations - - `_cuda,_num-gpus.1,_resnet50,_run_harness,_tensorrt,_v4.0` -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--audio_buffer_num_lines=value` → `CM_MLPERF_NVIDIA_HARNESS_AUDIO_BUFFER_NUM_LINES=value` - * `--count=value` → `CM_MLPERF_LOADGEN_QUERY_COUNT=value` - * `--deque_timeout_usec=value` → `CM_MLPERF_NVIDIA_HARNESS_DEQUE_TIMEOUT_USEC=value` - * `--devices=value` → `CM_MLPERF_NVIDIA_HARNESS_DEVICES=value` - * `--dla_batch_size=value` → `CM_MLPERF_NVIDIA_HARNESS_DLA_BATCH_SIZE=value` - * `--dla_copy_streams=value` → `CM_MLPERF_NVIDIA_HARNESS_DLA_COPY_STREAMS=value` - * `--dla_inference_streams=value` → `CM_MLPERF_NVIDIA_HARNESS_DLA_INFERENCE_STREAMS=value` - * `--embedding_weights_on_gpu_part=value` → `CM_MLPERF_NVIDIA_HARNESS_EMBEDDING_WEIGHTS_ON_GPU_PART=value` - * `--enable_sort=value` → `CM_MLPERF_NVIDIA_HARNESS_ENABLE_SORT=value` - * `--end_on_device=value` → `CM_MLPERF_NVIDIA_HARNESS_END_ON_DEVICE=value` - * `--extra_run_options=value` → `CM_MLPERF_NVIDIA_HARNESS_EXTRA_RUN_OPTIONS=value` - * `--gpu_batch_size=value` → `CM_MLPERF_NVIDIA_HARNESS_GPU_BATCH_SIZE=value` - * `--gpu_copy_streams=value` → `CM_MLPERF_NVIDIA_HARNESS_GPU_COPY_STREAMS=value` - * `--gpu_inference_streams=value` → `CM_MLPERF_NVIDIA_HARNESS_GPU_INFERENCE_STREAMS=value` - * `--graphs_max_seqlen=value` → `CM_MLPERF_NVIDIA_HARNESS_GRAPHS_MAX_SEQLEN=value` - * `--input_format=value` → `CM_MLPERF_NVIDIA_HARNESS_INPUT_FORMAT=value` - * `--log_dir=value` → `CM_MLPERF_NVIDIA_HARNESS_LOG_DIR=value` - * `--make_cmd=value` → `MLPERF_NVIDIA_RUN_COMMAND=value` - * `--max_batchsize=value` → `CM_MLPERF_LOADGEN_MAX_BATCHSIZE=value` - * `--max_dlas=value` → `CM_MLPERF_NVIDIA_HARNESS_MAX_DLAS=value` - * `--mlperf_conf=value` → `CM_MLPERF_CONF=value` - * `--mode=value` → `CM_MLPERF_LOADGEN_MODE=value` - * `--multistream_target_latency=value` → `CM_MLPERF_LOADGEN_MULTISTREAM_TARGET_LATENCY=value` - * `--num_issue_query_threads=value` → `CM_MLPERF_NVIDIA_HARNESS_NUM_ISSUE_QUERY_THREADS=value` - * `--num_sort_segments=value` → `CM_MLPERF_NVIDIA_HARNESS_NUM_SORT_SEGMENTS=value` - * `--num_warmups=value` → `CM_MLPERF_NVIDIA_HARNESS_NUM_WARMUPS=value` - * `--offline_target_qps=value` → `CM_MLPERF_LOADGEN_OFFLINE_TARGET_QPS=value` - * `--output_dir=value` → `CM_MLPERF_OUTPUT_DIR=value` - * `--performance_sample_count=value` → `CM_MLPERF_LOADGEN_PERFORMANCE_SAMPLE_COUNT=value` - * `--power_setting=value` → `CM_MLPERF_NVIDIA_HARNESS_POWER_SETTING=value` - * `--rerun=value` → `CM_RERUN=value` - * `--run_infer_on_copy_streams=value` → `CM_MLPERF_NVIDIA_HARNESS_RUN_INFER_ON_COPY_STREAMS=value` - * `--scenario=value` → `CM_MLPERF_LOADGEN_SCENARIO=value` - * `--server_target_qps=value` → `CM_MLPERF_LOADGEN_SERVER_TARGET_QPS=value` - * `--singlestream_target_latency=value` → `CM_MLPERF_LOADGEN_SINGLESTREAM_TARGET_LATENCY=value` - * `--skip_postprocess=value` → `CM_MLPERF_NVIDIA_HARNESS_SKIP_POSTPROCESS=value` - * `--skip_preprocess=value` → `CM_SKIP_PREPROCESS_DATASET=value` - * `--skip_preprocessing=value` → `CM_SKIP_PREPROCESS_DATASET=value` - * `--soft_drop=value` → `CM_MLPERF_NVIDIA_HARNESS_SOFT_DROP=value` - * `--start_from_device=value` → `CM_MLPERF_NVIDIA_HARNESS_START_FROM_DEVICE=value` - * `--target_latency=value` → `CM_MLPERF_LOADGEN_TARGET_LATENCY=value` - * `--target_qps=value` → `CM_MLPERF_LOADGEN_TARGET_QPS=value` - * `--use_cuda_thread_per_device=value` → `CM_MLPERF_NVIDIA_HARNESS_USE_CUDA_THREAD_PER_DEVICE=value` - * `--use_deque_limit=value` → `CM_MLPERF_NVIDIA_HARNESS_USE_DEQUE_LIMIT=value` - * `--use_fp8=value` → `CM_MLPERF_NVIDIA_HARNESS_USE_FP8=value` - * `--use_graphs=value` → `CM_MLPERF_NVIDIA_HARNESS_USE_GRAPHS=value` - * `--use_small_tile_gemm_plugin=value` → `CM_MLPERF_NVIDIA_HARNESS_USE_SMALL_TILE_GEMM_PLUGIN=value` - * `--use_triton=value` → `CM_MLPERF_NVIDIA_HARNESS_USE_TRITON=value` - * `--user_conf=value` → `CM_MLPERF_USER_CONF=value` - * `--workspace_size=value` → `CM_MLPERF_NVIDIA_HARNESS_WORKSPACE_SIZE=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_BATCH_COUNT: `1` - * CM_BATCH_SIZE: `1` - * CM_FAST_COMPILATION: `yes` - * CM_MLPERF_LOADGEN_SCENARIO: `Offline` - * CM_MLPERF_LOADGEN_MODE: `performance` - * CM_SKIP_PREPROCESS_DATASET: `no` - * CM_SKIP_MODEL_DOWNLOAD: `no` - * CM_MLPERF_SUT_NAME_IMPLEMENTATION_PREFIX: `nvidia_original` - * CM_MLPERF_SKIP_RUN: `no` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/app-mlperf-inference-nvidia/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "reproduce mlcommons mlperf inference harness nvidia-harness nvidia [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Reproduce-MLPerf-benchmarks/index.md b/docs/scripts/Reproduce-MLPerf-benchmarks/index.md deleted file mode 100644 index 6db8a9a3e..000000000 --- a/docs/scripts/Reproduce-MLPerf-benchmarks/index.md +++ /dev/null @@ -1,4 +0,0 @@ -* [app-mlperf-inference-nvidia](app-mlperf-inference-nvidia/index.md) -* [reproduce-mlperf-octoml-tinyml-results](reproduce-mlperf-octoml-tinyml-results/index.md) -* [reproduce-mlperf-training-nvidia](reproduce-mlperf-training-nvidia/index.md) -* [wrapper-reproduce-octoml-tinyml-submission](wrapper-reproduce-octoml-tinyml-submission/index.md) diff --git a/docs/scripts/Reproduce-MLPerf-benchmarks/reproduce-mlperf-octoml-tinyml-results/index.md b/docs/scripts/Reproduce-MLPerf-benchmarks/reproduce-mlperf-octoml-tinyml-results/index.md deleted file mode 100644 index d32b17538..000000000 --- a/docs/scripts/Reproduce-MLPerf-benchmarks/reproduce-mlperf-octoml-tinyml-results/index.md +++ /dev/null @@ -1,137 +0,0 @@ -# reproduce-mlperf-octoml-tinyml-results -Automatically generated README for this automation recipe: **reproduce-mlperf-octoml-tinyml-results** - -Category: **[Reproduce MLPerf benchmarks](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-octoml-tinyml-results/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-octoml-tinyml-results/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "reproduce tiny results mlperf octoml mlcommons" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=reproduce,tiny,results,mlperf,octoml,mlcommons[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "reproduce tiny results mlperf octoml mlcommons [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'reproduce,tiny,results,mlperf,octoml,mlcommons' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "reproduce tiny results mlperf octoml mlcommons[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_NRF` - - ENV variables: - - CM_TINY_BOARD: `NRF5340DK` - * `_NUCLEO` - - ENV variables: - - CM_TINY_BOARD: `NUCLEO_L4R5ZI` - * `_ad` - - ENV variables: - - CM_TINY_MODEL: `ad` - * `_cmsis_nn` - - ENV variables: - - CM_MICROTVM_VARIANT: `microtvm_cmsis_nn` - * `_ic` - - ENV variables: - - CM_TINY_MODEL: `ic` - * `_kws` - - ENV variables: - - CM_TINY_MODEL: `kws` - * `_native` - - ENV variables: - - CM_MICROTVM_VARIANT: `microtvm_native` - * `_vww` - - ENV variables: - - CM_TINY_MODEL: `vww` - -
- -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--flash=value` → `CM_FLASH_BOARD=value` - * `--recreate_binary=value` → `CM_RECREATE_BINARY=value` - - - -#### Versions -Default version: `r1.0` - -* `r1.0` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-octoml-tinyml-results/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "reproduce tiny results mlperf octoml mlcommons [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Reproduce-MLPerf-benchmarks/reproduce-mlperf-training-nvidia/index.md b/docs/scripts/Reproduce-MLPerf-benchmarks/reproduce-mlperf-training-nvidia/index.md deleted file mode 100644 index 8b461ba10..000000000 --- a/docs/scripts/Reproduce-MLPerf-benchmarks/reproduce-mlperf-training-nvidia/index.md +++ /dev/null @@ -1,115 +0,0 @@ -# reproduce-mlperf-training-nvidia -Automatically generated README for this automation recipe: **reproduce-mlperf-training-nvidia** - -Category: **[Reproduce MLPerf benchmarks](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-training-nvidia/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "reproduce mlcommons mlperf train training nvidia-training nvidia" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=reproduce,mlcommons,mlperf,train,training,nvidia-training,nvidia[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "reproduce mlcommons mlperf train training nvidia-training nvidia [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'reproduce,mlcommons,mlperf,train,training,nvidia-training,nvidia' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "reproduce mlcommons mlperf train training nvidia-training nvidia[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**benchmark**" -
- Click here to expand this section. - - * `_resnet` - - ENV variables: - - CM_MLPERF_TRAINING_BENCHMARK: `resnet` - -
- -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--results_dir=value` → `CM_MLPERF_RESULTS_DIR=value` - * `--system_conf_name=value` → `CM_MLPERF_NVIDIA_TRAINING_SYSTEM_CONF_NAME=value` - - - -#### Versions -* `r2.1` -* `r3.0` - -#### Native script being run -=== "Linux/macOS" - * [run-resnet.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-training-nvidia/run-resnet.sh) - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-mlperf-training-nvidia/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "reproduce mlcommons mlperf train training nvidia-training nvidia [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Reproduce-MLPerf-benchmarks/wrapper-reproduce-octoml-tinyml-submission/index.md b/docs/scripts/Reproduce-MLPerf-benchmarks/wrapper-reproduce-octoml-tinyml-submission/index.md deleted file mode 100644 index 2a69d5c1c..000000000 --- a/docs/scripts/Reproduce-MLPerf-benchmarks/wrapper-reproduce-octoml-tinyml-submission/index.md +++ /dev/null @@ -1,101 +0,0 @@ -# wrapper-reproduce-octoml-tinyml-submission -Automatically generated README for this automation recipe: **wrapper-reproduce-octoml-tinyml-submission** - -Category: **[Reproduce MLPerf benchmarks](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/wrapper-reproduce-octoml-tinyml-submission/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/wrapper-reproduce-octoml-tinyml-submission/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "run generate-tiny generate submission tiny generate-tiny-submission results mlcommons mlperf octoml" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=run,generate-tiny,generate,submission,tiny,generate-tiny-submission,results,mlcommons,mlperf,octoml [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "run generate-tiny generate submission tiny generate-tiny-submission results mlcommons mlperf octoml " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'run,generate-tiny,generate,submission,tiny,generate-tiny-submission,results,mlcommons,mlperf,octoml' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "run generate-tiny generate submission tiny generate-tiny-submission results mlcommons mlperf octoml" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--flash=value` → `CM_FLASH_BOARD=value` - * `--recreate_binary=value` → `CM_RECREATE_BINARY=value` - - - -#### Versions -Default version: `r1.0` - -* `r1.0` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/wrapper-reproduce-octoml-tinyml-submission/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "run generate-tiny generate submission tiny generate-tiny-submission results mlcommons mlperf octoml " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Reproducibility-and-artifact-evaluation/get-ipol-src/index.md b/docs/scripts/Reproducibility-and-artifact-evaluation/get-ipol-src/index.md deleted file mode 100644 index ba254ab2b..000000000 --- a/docs/scripts/Reproducibility-and-artifact-evaluation/get-ipol-src/index.md +++ /dev/null @@ -1,98 +0,0 @@ -# get-ipol-src -Automatically generated README for this automation recipe: **get-ipol-src** - -Category: **[Reproducibility and artifact evaluation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ipol-src/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-ipol-src/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get ipol journal src ipol-src" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,ipol,journal,src,ipol-src [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get ipol journal src ipol-src " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,ipol,journal,src,ipol-src' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get ipol journal src ipol-src" [--input_flags] - ``` -___ - -=== "Input Flags" - - - #### Input Flags - - * --**number:** IPOL publication number - * --**year:** IPOL publication year -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--number=value` → `CM_IPOL_NUMBER=value` - * `--year=value` → `CM_IPOL_YEAR=value` - - - - -___ -#### Script output -```bash -cmr "get ipol journal src ipol-src " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Reproducibility-and-artifact-evaluation/index.md b/docs/scripts/Reproducibility-and-artifact-evaluation/index.md deleted file mode 100644 index 6803c39f9..000000000 --- a/docs/scripts/Reproducibility-and-artifact-evaluation/index.md +++ /dev/null @@ -1,4 +0,0 @@ -* [get-ipol-src](get-ipol-src/index.md) -* [process-ae-users](process-ae-users/index.md) -* [reproduce-ipol-paper-2022-439](reproduce-ipol-paper-2022-439/index.md) -* [reproduce-micro-paper-2023-victima](reproduce-micro-paper-2023-victima/index.md) diff --git a/docs/scripts/Reproducibility-and-artifact-evaluation/process-ae-users/index.md b/docs/scripts/Reproducibility-and-artifact-evaluation/process-ae-users/index.md deleted file mode 100644 index 51e9a4f91..000000000 --- a/docs/scripts/Reproducibility-and-artifact-evaluation/process-ae-users/index.md +++ /dev/null @@ -1,95 +0,0 @@ -# process-ae-users -Automatically generated README for this automation recipe: **process-ae-users** - -Category: **[Reproducibility and artifact evaluation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/process-ae-users/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "process ae users" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=process,ae,users [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "process ae users " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'process,ae,users' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "process ae users" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--file=value` → `CM_PROCESS_AE_USERS_INPUT_FILE=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/process-ae-users/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/process-ae-users/run.bat) -___ -#### Script output -```bash -cmr "process ae users " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Reproducibility-and-artifact-evaluation/reproduce-ipol-paper-2022-439/index.md b/docs/scripts/Reproducibility-and-artifact-evaluation/reproduce-ipol-paper-2022-439/index.md deleted file mode 100644 index d0298a09c..000000000 --- a/docs/scripts/Reproducibility-and-artifact-evaluation/reproduce-ipol-paper-2022-439/index.md +++ /dev/null @@ -1,97 +0,0 @@ -# reproduce-ipol-paper-2022-439 -Automatically generated README for this automation recipe: **reproduce-ipol-paper-2022-439** - -Category: **[Reproducibility and artifact evaluation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-ipol-paper-2022-439/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-ipol-paper-2022-439/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "app python reproduce project paper ipol journal repro reproducibility pytorch 2022-439" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=app,python,reproduce,project,paper,ipol,journal,repro,reproducibility,pytorch,2022-439 [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "app python reproduce project paper ipol journal repro reproducibility pytorch 2022-439 " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'app,python,reproduce,project,paper,ipol,journal,repro,reproducibility,pytorch,2022-439' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "app python reproduce project paper ipol journal repro reproducibility pytorch 2022-439" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--image1=value` → `CM_IMAGE_1=value` - * `--image2=value` → `CM_IMAGE_2=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-ipol-paper-2022-439/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-ipol-paper-2022-439/run.bat) -___ -#### Script output -```bash -cmr "app python reproduce project paper ipol journal repro reproducibility pytorch 2022-439 " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Reproducibility-and-artifact-evaluation/reproduce-micro-paper-2023-victima/index.md b/docs/scripts/Reproducibility-and-artifact-evaluation/reproduce-micro-paper-2023-victima/index.md deleted file mode 100644 index 461210341..000000000 --- a/docs/scripts/Reproducibility-and-artifact-evaluation/reproduce-micro-paper-2023-victima/index.md +++ /dev/null @@ -1,123 +0,0 @@ -# reproduce-micro-paper-2023-victima -Automatically generated README for this automation recipe: **reproduce-micro-paper-2023-victima** - -Category: **[Reproducibility and artifact evaluation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-micro-paper-2023-victima/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-micro-paper-2023-victima/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "reproduce project paper micro micro-2023 victima" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=reproduce,project,paper,micro,micro-2023,victima[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "reproduce project paper micro micro-2023 victima [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'reproduce,project,paper,micro,micro-2023,victima' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "reproduce project paper micro micro-2023 victima[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_install_deps` - * `_plot` - * `_run` - -
- -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--container=value` → `CM_VICTIMA_CONTAINER=value` - * `--job_manager=value` → `CM_VICTIMA_JOB_MANAGER=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_VICTIMA_JOB_MANAGER: `native` - * CM_VICTIMA_CONTAINER: `docker` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/reproduce-micro-paper-2023-victima/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "reproduce project paper micro micro-2023 victima [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Tests/index.md b/docs/scripts/Tests/index.md deleted file mode 100644 index c7d48602b..000000000 --- a/docs/scripts/Tests/index.md +++ /dev/null @@ -1,15 +0,0 @@ -* [print-any-text](print-any-text/index.md) -* [print-croissant-desc](print-croissant-desc/index.md) -* [print-hello-world](print-hello-world/index.md) -* [print-hello-world-java](print-hello-world-java/index.md) -* [print-hello-world-javac](print-hello-world-javac/index.md) -* [print-hello-world-py](print-hello-world-py/index.md) -* [print-python-version](print-python-version/index.md) -* [run-python](run-python/index.md) -* [test-cm-core](test-cm-core/index.md) -* [test-cm-script-pipeline](test-cm-script-pipeline/index.md) -* [test-deps-conditions](test-deps-conditions/index.md) -* [test-deps-conditions2](test-deps-conditions2/index.md) -* [test-download-and-extract-artifacts](test-download-and-extract-artifacts/index.md) -* [test-set-sys-user-cm](test-set-sys-user-cm/index.md) -* [upgrade-python-pip](upgrade-python-pip/index.md) diff --git a/docs/scripts/Tests/print-any-text/index.md b/docs/scripts/Tests/print-any-text/index.md deleted file mode 100644 index 3a924de55..000000000 --- a/docs/scripts/Tests/print-any-text/index.md +++ /dev/null @@ -1,129 +0,0 @@ -# print-any-text -Automatically generated README for this automation recipe: **print-any-text** - -Category: **[Tests](..)** - -License: **Apache 2.0** - -Developers: Grigori Fursin - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/print-any-text/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "print any-text" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=print,any-text[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "print any-text [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'print,any-text' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "print any-text[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_cm_env.#` - - ENV variables: - - CM_PRINT_ANY_CM_ENV_KEYS: `#` - * `_os_env.#` - - ENV variables: - - CM_PRINT_ANY_OS_ENV_KEYS: `#` - * `_text.#` - - ENV variables: - - CM_PRINT_ANY_TEXT: `#` - -
- -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--cm_env_keys=value` → `CM_PRINT_ANY_CM_ENV_KEYS=value` - * `--os_env_keys=value` → `CM_PRINT_ANY_OS_ENV_KEYS=value` - * `--text=value` → `CM_PRINT_ANY_TEXT=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_PRINT_ANY_TEXT: `` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/print-any-text/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/print-any-text/run.bat) -___ -#### Script output -```bash -cmr "print any-text [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Tests/print-croissant-desc/index.md b/docs/scripts/Tests/print-croissant-desc/index.md deleted file mode 100644 index 2533d905d..000000000 --- a/docs/scripts/Tests/print-croissant-desc/index.md +++ /dev/null @@ -1,106 +0,0 @@ -# print-croissant-desc -Automatically generated README for this automation recipe: **print-croissant-desc** - -Category: **[Tests](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/print-croissant-desc/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/print-croissant-desc/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "print croissant desc" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=print,croissant,desc [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "print croissant desc " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'print,croissant,desc' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "print croissant desc" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--url=value` → `CM_PRINT_CROISSANT_URL=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_PRINT_CROISSANT_URL: `https://raw.githubusercontent.com/mlcommons/croissant/main/datasets/1.0/gpt-3/metadata.json` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/print-croissant-desc/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/print-croissant-desc/run.bat) -___ -#### Script output -```bash -cmr "print croissant desc " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Tests/print-hello-world-java/index.md b/docs/scripts/Tests/print-hello-world-java/index.md deleted file mode 100644 index 56a73326b..000000000 --- a/docs/scripts/Tests/print-hello-world-java/index.md +++ /dev/null @@ -1,86 +0,0 @@ -# print-hello-world-java -Automatically generated README for this automation recipe: **print-hello-world-java** - -Category: **[Tests](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-java/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "print hello world hello-world hello world java" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=print,hello world,hello-world,hello,world,java - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "print hello world hello-world hello world java " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'print,hello world,hello-world,hello,world,java' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "print hello world hello-world hello world java" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-java/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-java/run.bat) -___ -#### Script output -```bash -cmr "print hello world hello-world hello world java " -j -``` \ No newline at end of file diff --git a/docs/scripts/Tests/print-hello-world-javac/index.md b/docs/scripts/Tests/print-hello-world-javac/index.md deleted file mode 100644 index 0166b29cd..000000000 --- a/docs/scripts/Tests/print-hello-world-javac/index.md +++ /dev/null @@ -1,86 +0,0 @@ -# print-hello-world-javac -Automatically generated README for this automation recipe: **print-hello-world-javac** - -Category: **[Tests](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-javac/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "print hello world hello-world hello world javac" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=print,hello world,hello-world,hello,world,javac - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "print hello world hello-world hello world javac " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'print,hello world,hello-world,hello,world,javac' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "print hello world hello-world hello world javac" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-javac/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-javac/run.bat) -___ -#### Script output -```bash -cmr "print hello world hello-world hello world javac " -j -``` \ No newline at end of file diff --git a/docs/scripts/Tests/print-hello-world-py/index.md b/docs/scripts/Tests/print-hello-world-py/index.md deleted file mode 100644 index e753b2fd8..000000000 --- a/docs/scripts/Tests/print-hello-world-py/index.md +++ /dev/null @@ -1,86 +0,0 @@ -# print-hello-world-py -Automatically generated README for this automation recipe: **print-hello-world-py** - -Category: **[Tests](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-py/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "print hello world hello-world hello world python" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=print,hello world,hello-world,hello,world,python - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "print hello world hello-world hello world python " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'print,hello world,hello-world,hello,world,python' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "print hello world hello-world hello world python" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-py/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world-py/run.bat) -___ -#### Script output -```bash -cmr "print hello world hello-world hello world python " -j -``` \ No newline at end of file diff --git a/docs/scripts/Tests/print-hello-world/index.md b/docs/scripts/Tests/print-hello-world/index.md deleted file mode 100644 index d0bba05ba..000000000 --- a/docs/scripts/Tests/print-hello-world/index.md +++ /dev/null @@ -1,123 +0,0 @@ -# print-hello-world -Automatically generated README for this automation recipe: **print-hello-world** - -Category: **[Tests](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "print hello-world hello world hello world native-script native script" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=print,hello-world,hello world,hello,world,native-script,native,script[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "print hello-world hello world hello world native-script native script [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'print,hello-world,hello world,hello,world,native-script,native,script' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "print hello-world hello world hello world native-script native script[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_skip_print_env` - - ENV variables: - - CM_PRINT_HELLO_WORLD_SKIP_PRINT_ENV: `yes` - * `_text.#` - - ENV variables: - - CM_PRINT_HELLO_WORLD_TEXT: `#` - -
- -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--test1=value` → `CM_ENV_TEST1=value` - - - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_ENV_TEST1: `TEST1` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/print-hello-world/run.bat) -___ -#### Script output -```bash -cmr "print hello-world hello world hello world native-script native script [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Tests/print-python-version/index.md b/docs/scripts/Tests/print-python-version/index.md deleted file mode 100644 index 6fd14d421..000000000 --- a/docs/scripts/Tests/print-python-version/index.md +++ /dev/null @@ -1,86 +0,0 @@ -# print-python-version -Automatically generated README for this automation recipe: **print-python-version** - -Category: **[Tests](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/print-python-version/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "print python version python-version" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=print,python,version,python-version - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "print python version python-version " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'print,python,version,python-version' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "print python version python-version" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/print-python-version/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/print-python-version/run.bat) -___ -#### Script output -```bash -cmr "print python version python-version " -j -``` \ No newline at end of file diff --git a/docs/scripts/Tests/run-python/index.md b/docs/scripts/Tests/run-python/index.md deleted file mode 100644 index be4b0fa0f..000000000 --- a/docs/scripts/Tests/run-python/index.md +++ /dev/null @@ -1,95 +0,0 @@ -# run-python -Automatically generated README for this automation recipe: **run-python** - -Category: **[Tests](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/run-python/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "run python" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=run,python [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "run python " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'run,python' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "run python" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--command=value` → `CM_RUN_PYTHON_CMD=value` - - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/run-python/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/run-python/run.bat) -___ -#### Script output -```bash -cmr "run python " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Tests/test-cm-core/index.md b/docs/scripts/Tests/test-cm-core/index.md deleted file mode 100644 index d7ad5f859..000000000 --- a/docs/scripts/Tests/test-cm-core/index.md +++ /dev/null @@ -1,87 +0,0 @@ -# test-cm-core -Automatically generated README for this automation recipe: **test-cm-core** - -Category: **[Tests](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-core/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-core/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "test cm core" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=test,cm,core - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "test cm core " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'test,cm,core' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "test cm core" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-core/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-core/run.bat) -___ -#### Script output -```bash -cmr "test cm core " -j -``` \ No newline at end of file diff --git a/docs/scripts/Tests/test-cm-script-pipeline/index.md b/docs/scripts/Tests/test-cm-script-pipeline/index.md deleted file mode 100644 index 4ab088034..000000000 --- a/docs/scripts/Tests/test-cm-script-pipeline/index.md +++ /dev/null @@ -1,90 +0,0 @@ -# test-cm-script-pipeline -Automatically generated README for this automation recipe: **test-cm-script-pipeline** - -Category: **[Tests](..)** - -License: **Apache 2.0** - -Developers: Grigori Fursin -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-script-pipeline/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-script-pipeline/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "test cm-script pipeline" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=test,cm-script,pipeline - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "test cm-script pipeline " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'test,cm-script,pipeline' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "test cm-script pipeline" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-script-pipeline/run.sh) - * [run2.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-script-pipeline/run2.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-script-pipeline/run.bat) - * [run2.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/test-cm-script-pipeline/run2.bat) -___ -#### Script output -```bash -cmr "test cm-script pipeline " -j -``` \ No newline at end of file diff --git a/docs/scripts/Tests/test-deps-conditions/index.md b/docs/scripts/Tests/test-deps-conditions/index.md deleted file mode 100644 index 976ddd6b6..000000000 --- a/docs/scripts/Tests/test-deps-conditions/index.md +++ /dev/null @@ -1,93 +0,0 @@ -# test-deps-conditions -Automatically generated README for this automation recipe: **test-deps-conditions** - -Category: **[Tests](..)** - -License: **Apache 2.0** - -Developers: Grigori Fursin -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/test-deps-conditions/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/test-deps-conditions/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "test deps conditions" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=test,deps,conditions [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "test deps conditions " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'test,deps,conditions' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "test deps conditions" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--test1=value` → `CM_ENV1=value` - * `--test2=value` → `CM_ENV2=value` - * `--test3=value` → `CM_ENV3=value` - - - - -___ -#### Script output -```bash -cmr "test deps conditions " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Tests/test-deps-conditions2/index.md b/docs/scripts/Tests/test-deps-conditions2/index.md deleted file mode 100644 index 94ed26f62..000000000 --- a/docs/scripts/Tests/test-deps-conditions2/index.md +++ /dev/null @@ -1,91 +0,0 @@ -# test-deps-conditions2 -Automatically generated README for this automation recipe: **test-deps-conditions2** - -Category: **[Tests](..)** - -License: **Apache 2.0** - -Developers: Grigori Fursin -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/test-deps-conditions2/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/test-deps-conditions2/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "test deps conditions2" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=test,deps,conditions2 [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "test deps conditions2 " [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'test,deps,conditions2' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "test deps conditions2" [--input_flags] - ``` -___ - -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--test=value` → `TEST=value` - - - - -___ -#### Script output -```bash -cmr "test deps conditions2 " [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/Tests/test-download-and-extract-artifacts/index.md b/docs/scripts/Tests/test-download-and-extract-artifacts/index.md deleted file mode 100644 index 8e2e0d0d1..000000000 --- a/docs/scripts/Tests/test-download-and-extract-artifacts/index.md +++ /dev/null @@ -1,87 +0,0 @@ -# test-download-and-extract-artifacts -Automatically generated README for this automation recipe: **test-download-and-extract-artifacts** - -Category: **[Tests](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/test-download-and-extract-artifacts/README-extra.md) - -* CM meta description for this script: *[_cm.yaml](https://github.com/mlcommons/cm4mlops/tree/main/script/test-download-and-extract-artifacts/_cm.yaml)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "test download-and-extract-artifacts" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=test,download-and-extract-artifacts - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "test download-and-extract-artifacts " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'test,download-and-extract-artifacts' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "test download-and-extract-artifacts" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/test-download-and-extract-artifacts/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/test-download-and-extract-artifacts/run.bat) -___ -#### Script output -```bash -cmr "test download-and-extract-artifacts " -j -``` \ No newline at end of file diff --git a/docs/scripts/Tests/test-set-sys-user-cm/index.md b/docs/scripts/Tests/test-set-sys-user-cm/index.md deleted file mode 100644 index f4448d63b..000000000 --- a/docs/scripts/Tests/test-set-sys-user-cm/index.md +++ /dev/null @@ -1,96 +0,0 @@ -# test-set-sys-user-cm -Automatically generated README for this automation recipe: **test-set-sys-user-cm** - -Category: **[Tests](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/test-set-sys-user-cm/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "demo set sys-user cm sys-user-cm" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=demo,set,sys-user,cm,sys-user-cm - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "demo set sys-user cm sys-user-cm " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'demo,set,sys-user,cm,sys-user-cm' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "demo set sys-user cm sys-user-cm" - ``` -___ - -=== "Default environment" - - #### Default environment - - - These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags. - - * CM_SUDO: `sudo` - - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/test-set-sys-user-cm/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "demo set sys-user cm sys-user-cm " -j -``` \ No newline at end of file diff --git a/docs/scripts/Tests/upgrade-python-pip/index.md b/docs/scripts/Tests/upgrade-python-pip/index.md deleted file mode 100644 index 3e593c727..000000000 --- a/docs/scripts/Tests/upgrade-python-pip/index.md +++ /dev/null @@ -1,86 +0,0 @@ -# upgrade-python-pip -Automatically generated README for this automation recipe: **upgrade-python-pip** - -Category: **[Tests](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/upgrade-python-pip/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "upgrade python pip python-pip" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=upgrade,python,pip,python-pip - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "upgrade python pip python-pip " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'upgrade,python,pip,python-pip' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "upgrade python pip python-pip" - ``` -___ - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/upgrade-python-pip/run.sh) -=== "Windows" - - * [run.bat](https://github.com/mlcommons/cm4mlops/tree/main/script/upgrade-python-pip/run.bat) -___ -#### Script output -```bash -cmr "upgrade python pip python-pip " -j -``` \ No newline at end of file diff --git a/docs/scripts/TinyML-automation/create-fpgaconvnet-app-tinyml/index.md b/docs/scripts/TinyML-automation/create-fpgaconvnet-app-tinyml/index.md deleted file mode 100644 index ae17fabc4..000000000 --- a/docs/scripts/TinyML-automation/create-fpgaconvnet-app-tinyml/index.md +++ /dev/null @@ -1,114 +0,0 @@ -# create-fpgaconvnet-app-tinyml -Automatically generated README for this automation recipe: **create-fpgaconvnet-app-tinyml** - -Category: **[TinyML automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/create-fpgaconvnet-app-tinyml/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "create app fpgaconvnet" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=create,app,fpgaconvnet[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "create app fpgaconvnet [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'create,app,fpgaconvnet' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "create app fpgaconvnet[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**benchmark**" -
- Click here to expand this section. - - * **`_ic`** (default) - -
- - - * Group "**board**" -
- Click here to expand this section. - - * **`_zc706`** (default) - - ENV variables: - - CM_TINY_BOARD: `zc706` - -
- - - ##### Default variations - - `_ic,_zc706` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/create-fpgaconvnet-app-tinyml/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "create app fpgaconvnet [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/TinyML-automation/create-fpgaconvnet-config-tinyml/index.md b/docs/scripts/TinyML-automation/create-fpgaconvnet-config-tinyml/index.md deleted file mode 100644 index e1e0bab29..000000000 --- a/docs/scripts/TinyML-automation/create-fpgaconvnet-config-tinyml/index.md +++ /dev/null @@ -1,114 +0,0 @@ -# create-fpgaconvnet-config-tinyml -Automatically generated README for this automation recipe: **create-fpgaconvnet-config-tinyml** - -Category: **[TinyML automation](..)** - -License: **Apache 2.0** - - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/create-fpgaconvnet-config-tinyml/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "create config fpgaconvnet" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=create,config,fpgaconvnet[,variations] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "create config fpgaconvnet [variations]" - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'create,config,fpgaconvnet' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "create config fpgaconvnet[variations]" - ``` -___ - -=== "Variations" - - - #### Variations - - * Group "**benchmark**" -
- Click here to expand this section. - - * **`_ic`** (default) - -
- - - * Group "**board**" -
- Click here to expand this section. - - * **`_zc706`** (default) - - ENV variables: - - CM_TINY_BOARD: `zc706` - -
- - - ##### Default variations - - `_ic,_zc706` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/create-fpgaconvnet-config-tinyml/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "create config fpgaconvnet [variations]" -j -``` \ No newline at end of file diff --git a/docs/scripts/TinyML-automation/flash-tinyml-binary/index.md b/docs/scripts/TinyML-automation/flash-tinyml-binary/index.md deleted file mode 100644 index df6a2e298..000000000 --- a/docs/scripts/TinyML-automation/flash-tinyml-binary/index.md +++ /dev/null @@ -1,119 +0,0 @@ -# flash-tinyml-binary -Automatically generated README for this automation recipe: **flash-tinyml-binary** - -Category: **[TinyML automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/flash-tinyml-binary/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/flash-tinyml-binary/_cm.json)* -* Output cached? *False* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "flash tiny mlperf mlcommons" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=flash,tiny,mlperf,mlcommons[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "flash tiny mlperf mlcommons [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'flash,tiny,mlperf,mlcommons' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "flash tiny mlperf mlcommons[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_NRF` - * `_NUCLEO` - * `_ad` - * `_cmsis_nn` - * `_ic` - * `_kws` - * `_native` - * `_vww` - -
- -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--build_dir=value` → `CM_TINY_BUILD_DIR=value` - - - -#### Versions -Default version: `r1.0` - - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/flash-tinyml-binary/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "flash tiny mlperf mlcommons [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/TinyML-automation/get-microtvm/index.md b/docs/scripts/TinyML-automation/get-microtvm/index.md deleted file mode 100644 index b73c5eb0a..000000000 --- a/docs/scripts/TinyML-automation/get-microtvm/index.md +++ /dev/null @@ -1,119 +0,0 @@ -# get-microtvm -Automatically generated README for this automation recipe: **get-microtvm** - -Category: **[TinyML automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-microtvm/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-microtvm/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get src source microtvm tiny" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,src,source,microtvm,tiny[,variations] [--input_flags] - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get src source microtvm tiny [variations]" [--input_flags] - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,src,source,microtvm,tiny' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get src source microtvm tiny[variations]" [--input_flags] - ``` -___ - -=== "Variations" - - - #### Variations - - * *No group (any combination of variations can be selected)* -
- Click here to expand this section. - - * `_full-history` - - ENV variables: - - CM_GIT_DEPTH: `--depth 10` - * `_short-history` - - ENV variables: - - CM_GIT_DEPTH: `--depth 10` - -
- -=== "Input Flag Mapping" - - - #### Script flags mapped to environment - - * `--ssh=value` → `CM_GIT_SSH=value` - - - -#### Versions -Default version: `main` - -* `custom` -* `main` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-microtvm/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get src source microtvm tiny [variations]" [--input_flags] -j -``` \ No newline at end of file diff --git a/docs/scripts/TinyML-automation/get-zephyr-sdk/index.md b/docs/scripts/TinyML-automation/get-zephyr-sdk/index.md deleted file mode 100644 index e32311f97..000000000 --- a/docs/scripts/TinyML-automation/get-zephyr-sdk/index.md +++ /dev/null @@ -1,93 +0,0 @@ -# get-zephyr-sdk -Automatically generated README for this automation recipe: **get-zephyr-sdk** - -Category: **[TinyML automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zephyr-sdk/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zephyr-sdk/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get zephyr-sdk" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,zephyr-sdk - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get zephyr-sdk " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,zephyr-sdk' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get zephyr-sdk" - ``` -___ - -#### Versions -Default version: `0.13.2` - -* `0.13.1` -* `0.13.2` -* `0.15.0` - -#### Native script being run -=== "Linux/macOS" - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zephyr-sdk/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get zephyr-sdk " -j -``` \ No newline at end of file diff --git a/docs/scripts/TinyML-automation/get-zephyr/index.md b/docs/scripts/TinyML-automation/get-zephyr/index.md deleted file mode 100644 index 6016b7ecf..000000000 --- a/docs/scripts/TinyML-automation/get-zephyr/index.md +++ /dev/null @@ -1,92 +0,0 @@ -# get-zephyr -Automatically generated README for this automation recipe: **get-zephyr** - -Category: **[TinyML automation](..)** - -License: **Apache 2.0** - -* Notes from the authors, contributors and users: [*README-extra*](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zephyr/README-extra.md) - -* CM meta description for this script: *[_cm.json](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zephyr/_cm.json)* -* Output cached? *True* - ---- -### Reuse this script in your project - -#### Install MLCommons CM automation meta-framework - -* [Install CM](https://docs.mlcommons.org/ck/install) -* [CM Getting Started Guide](https://docs.mlcommons.org/ck/getting-started/) - -#### Pull CM repository with this automation recipe (CM script) - -```cm pull repo mlcommons@cm4mlops``` - -#### Print CM help from the command line - -````cmr "get zephyr" --help```` - -#### Run this script - -=== "CLI" - ##### Run this script via CLI - - ```bash - cm run script --tags=get,zephyr - ``` -=== "CLI Alt" - ##### Run this script via CLI (alternative) - - - ```bash - cmr "get zephyr " - ``` - -=== "Python" - ##### Run this script from Python - - - ```python - - import cmind - - r = cmind.access({'action':'run' - 'automation':'script', - 'tags':'get,zephyr' - 'out':'con', - ... - (other input keys for this script) - ... - }) - - if r['return']>0: - print (r['error']) - - ``` - - -=== "Docker" - ##### Run this script via Docker (beta) - - ```bash - cm docker script "get zephyr" - ``` -___ - -#### Versions -Default version: `v2.7` - -* `v2.7` - -#### Native script being run -=== "Linux/macOS" - * [run-ubuntu.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zephyr/run-ubuntu.sh) - * [run.sh](https://github.com/mlcommons/cm4mlops/tree/main/script/get-zephyr/run.sh) -=== "Windows" - - No run file exists for Windows -___ -#### Script output -```bash -cmr "get zephyr " -j -``` \ No newline at end of file diff --git a/docs/scripts/TinyML-automation/index.md b/docs/scripts/TinyML-automation/index.md deleted file mode 100644 index 1ac94a64a..000000000 --- a/docs/scripts/TinyML-automation/index.md +++ /dev/null @@ -1,6 +0,0 @@ -* [create-fpgaconvnet-app-tinyml](create-fpgaconvnet-app-tinyml/index.md) -* [create-fpgaconvnet-config-tinyml](create-fpgaconvnet-config-tinyml/index.md) -* [flash-tinyml-binary](flash-tinyml-binary/index.md) -* [get-microtvm](get-microtvm/index.md) -* [get-zephyr](get-zephyr/index.md) -* [get-zephyr-sdk](get-zephyr-sdk/index.md) diff --git a/docs/scripts/index.md b/docs/scripts/index.md deleted file mode 100644 index cc29ffc3e..000000000 --- a/docs/scripts/index.md +++ /dev/null @@ -1,30 +0,0 @@ -* [AI-ML-datasets](AI-ML-datasets) -* [AI-ML-frameworks](AI-ML-frameworks) -* [AI-ML-models](AI-ML-models) -* [AI-ML-optimization](AI-ML-optimization) -* [Cloud-automation](Cloud-automation) -* [CM-automation](CM-automation) -* [CM-Interface](CM-Interface) -* [CM-interface-prototyping](CM-interface-prototyping) -* [Collective-benchmarking](Collective-benchmarking) -* [Compiler-automation](Compiler-automation) -* [CUDA-automation](CUDA-automation) -* [Dashboard-automation](Dashboard-automation) -* [Detection-or-installation-of-tools-and-artifacts](Detection-or-installation-of-tools-and-artifacts) -* [DevOps-automation](DevOps-automation) -* [Docker-automation](Docker-automation) -* [GUI](GUI) -* [Legacy-CK-support](Legacy-CK-support) -* [MLPerf-benchmark-support](MLPerf-benchmark-support) -* [Modular-AI-ML-application-pipeline](Modular-AI-ML-application-pipeline) -* [Modular-application-pipeline](Modular-application-pipeline) -* [Modular-MLPerf-benchmarks](Modular-MLPerf-benchmarks) -* [Modular-MLPerf-inference-benchmark-pipeline](Modular-MLPerf-inference-benchmark-pipeline) -* [Modular-MLPerf-training-benchmark-pipeline](Modular-MLPerf-training-benchmark-pipeline) -* [Platform-information](Platform-information) -* [Python-automation](Python-automation) -* [Remote-automation](Remote-automation) -* [Reproduce-MLPerf-benchmarks](Reproduce-MLPerf-benchmarks) -* [Reproducibility-and-artifact-evaluation](Reproducibility-and-artifact-evaluation) -* [Tests](Tests) -* [TinyML-automation](TinyML-automation) diff --git a/mkdocs.yml b/mkdocs.yml index 87bcc7a5a..ebd48a20a 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -21,38 +21,6 @@ theme: nav: - HOME: index.md - Getting Started: getting-started.md - - CM Scripts: - - scripts/index.md - - Python-automation: scripts/Python-automation/index.md - - MLPerf-benchmark-support: scripts/MLPerf-benchmark-support/index.md - - Modular-AI-ML-application-pipeline: scripts/Modular-AI-ML-application-pipeline/index.md - - Modular-application-pipeline: scripts/Modular-application-pipeline/index.md - - Modular-MLPerf-inference-benchmark-pipeline: scripts/Modular-MLPerf-inference-benchmark-pipeline/index.md - - Modular-MLPerf-benchmarks: scripts/Modular-MLPerf-benchmarks/index.md - - Reproduce-MLPerf-benchmarks: scripts/Reproduce-MLPerf-benchmarks/index.md - - Modular-MLPerf-training-benchmark-pipeline: scripts/Modular-MLPerf-training-benchmark-pipeline/index.md - - DevOps-automation: scripts/DevOps-automation/index.md - - Docker-automation: scripts/Docker-automation/index.md - - AI-ML-optimization: scripts/AI-ML-optimization/index.md - - AI-ML-models: scripts/AI-ML-models/index.md - - CM-automation: scripts/CM-automation/index.md - - TinyML-automation: scripts/TinyML-automation/index.md - - Cloud-automation: scripts/Cloud-automation/index.md - - Platform-information: scripts/Platform-information/index.md - - Detection-or-installation-of-tools-and-artifacts: scripts/Detection-or-installation-of-tools-and-artifacts/index.md - - Compiler-automation: scripts/Compiler-automation/index.md - - CM-Interface: scripts/CM-Interface/index.md - - Legacy-CK-support: scripts/Legacy-CK-support/index.md - - AI-ML-datasets: scripts/AI-ML-datasets/index.md - - CUDA-automation: scripts/CUDA-automation/index.md - - AI-ML-frameworks: scripts/AI-ML-frameworks/index.md - - Reproducibility-and-artifact-evaluation: scripts/Reproducibility-and-artifact-evaluation/index.md - - GUI: scripts/GUI/index.md - - Collective-benchmarking: scripts/Collective-benchmarking/index.md - - Tests: scripts/Tests/index.md - - Dashboard-automation: scripts/Dashboard-automation/index.md - - Remote-automation: scripts/Remote-automation/index.md - - CM-interface-prototyping: scripts/CM-interface-prototyping/index.md markdown_extensions: - pymdownx.tasklist: @@ -74,4 +42,3 @@ markdown_extensions: plugins: - search - macros - - caseinsensitivefiles diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 72233ec3c..000000000 --- a/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -cmind @ git+https://github.com/gateoverflow/cm.git@mlperf-inference#egg=cmind&subdirectory=cm -pyyaml -requests -setuptools -giturlparse -tabulate diff --git a/script/app-mlperf-inference-mlcommons-cpp/CONTRIBUTING.md b/script/app-mlperf-inference-mlcommons-cpp/CONTRIBUTING.md deleted file mode 100644 index 16850be95..000000000 --- a/script/app-mlperf-inference-mlcommons-cpp/CONTRIBUTING.md +++ /dev/null @@ -1,29 +0,0 @@ -## Contributing to the MLCommons - -The best way to contribute to the MLCommons is to get involved with one of our many project communities. -You find more information about getting involved with MLCommons [here](https://mlcommons.org/en/get-involved/#getting-started). - -Generally we encourage people to become a MLCommons member if they wish to contribute to MLCommons projects, -but outside pull requests are very welcome too. - -Regardless of if you are a member, your organization needs to sign the MLCommons CLA. -Please fill out this [CLA sign up form](https://forms.gle/Ew1KkBVpyeJDuRw67) form to get started. - -MLCommons project work is tracked with issue trackers and pull requests. -Modify the project in your own fork and issue a pull request once you want other developers -to take a look at what you have done and discuss the proposed changes. -Ensure that cla-bot and other checks pass for your Pull requests. - -## Contributing to this project - -Please join our [Discord server](https://discord.gg/JjWNWXKxwT) -to learn about how to use the CK technology v3 (including the MLCommons CM automation language, CK playground -and Modular Inference Library) or participate in collaborative developments. - -Thank you for your support and looking forward to collaborating with you! - -## Core contributors - -* [Thomas Zhu](https://www.linkedin.com/in/hanwen-zhu-483614189) -* [Arjun Suresh](https://www.linkedin.com/in/arjunsuresh) -* [Grigori Fursin](https://cKnowledge.org/gfursin). diff --git a/script/app-mlperf-inference-mlcommons-python/README-about.md b/script/app-mlperf-inference-mlcommons-python/README-about.md deleted file mode 100644 index 77ba7ea07..000000000 --- a/script/app-mlperf-inference-mlcommons-python/README-about.md +++ /dev/null @@ -1,7 +0,0 @@ -This portable CM script is being developed by the [MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/mlperf-education-workgroup.md) -to modularize the *python reference implementations* of the [MLPerf inference benchmark](https://github.com/mlcommons/inference) -using the [MLCommons CM automation meta-framework](https://github.com/mlcommons/ck). -The goal is to make it easier to run, optimize and reproduce MLPerf benchmarks -across diverse platforms with continuously changing software and hardware. - -See the current coverage of different models, devices and backends [here](README-extra.md#current-coverage). diff --git a/script/create-custom-cache-entry/meta.yaml b/script/create-custom-cache-entry/meta.yaml index d7f876fae..2b78b1b9f 100644 --- a/script/create-custom-cache-entry/meta.yaml +++ b/script/create-custom-cache-entry/meta.yaml @@ -10,7 +10,7 @@ tags: - cache - entry -category: CM automation +category: MLC automation cache: true diff --git a/script/run-mlperf-inference-submission-checker/meta.yaml b/script/run-mlperf-inference-submission-checker/meta.yaml index 93faed4c9..0edef3341 100644 --- a/script/run-mlperf-inference-submission-checker/meta.yaml +++ b/script/run-mlperf-inference-submission-checker/meta.yaml @@ -16,6 +16,12 @@ deps: - inference-src - submission-checker-src tags: get,mlcommons,inference,src +- tags: pull,git,repo + env: + MLC_GIT_CHECKOUT_PATH: '<<>>' + enable_if_env: + MLC_MLPERF_INFERENCE_PULL_SRC_CHANGES: + - 'yes' - tags: get,generic-python-lib,_xlsxwriter - names: - pyarrow @@ -43,6 +49,8 @@ input_mapping: preprocess: MLC_TMP_MLPERF_INFERENCE_PREPROCESS_SUBMISSION preprocess_submission: MLC_TMP_MLPERF_INFERENCE_PREPROCESS_SUBMISSION push_to_github: MLC_MLPERF_RESULT_PUSH_TO_GITHUB + pull_changes: MLC_MLPERF_INFERENCE_PULL_SRC_CHANGES + pull_inference_changes: MLC_MLPERF_INFERENCE_PULL_SRC_CHANGES repo_branch: MLC_MLPERF_RESULTS_GIT_REPO_BRANCH repo_name: MLC_MLPERF_RESULTS_GIT_REPO_NAME repo_owner: MLC_MLPERF_RESULTS_GIT_REPO_OWNER